From b801d001e812778b1547352468d5f243b7070994 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 25 Apr 2024 10:47:46 +0200 Subject: [PATCH 01/27] Contracts: Stabilize XCM host fns (#4213) See https://github.com/paritytech/ink/pull/1912 https://github.com/paritytech/ink-docs/pull/338 --- prdoc/pr_4213.prdoc | 11 +++++++++++ substrate/frame/contracts/src/lib.rs | 4 ++-- substrate/frame/contracts/src/wasm/runtime.rs | 2 -- 3 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4213.prdoc diff --git a/prdoc/pr_4213.prdoc b/prdoc/pr_4213.prdoc new file mode 100644 index 000000000000..ce7eb65969b0 --- /dev/null +++ b/prdoc/pr_4213.prdoc @@ -0,0 +1,11 @@ +title: "[pallet-contracts] stabilize xcm_send and xcm_execute" + +doc: + - audience: Runtime Dev + description: | + `xcm_send` and `xcm_execute` are currently marked as unstable. This PR stabilizes them. +crates: +- name: pallet-contracts + bump: major + + diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 20cf7d1651cc..0045d72141c9 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -223,14 +223,14 @@ pub struct Environment { pub struct ApiVersion(u16); impl Default for ApiVersion { fn default() -> Self { - Self(2) + Self(3) } } #[test] fn api_version_is_up_to_date() { assert_eq!( - 109, + 111, crate::wasm::STABLE_API_COUNT, "Stable API count has changed. Bump the returned value of ApiVersion::default() and update the test." ); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 160dfa0d2f36..52ceda99edb7 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -2104,7 +2104,6 @@ pub mod env { /// Execute an XCM program locally, using the contract's address as the origin. /// See [`pallet_contracts_uapi::HostFn::execute_xcm`]. - #[unstable] fn xcm_execute( ctx: _, memory: _, @@ -2143,7 +2142,6 @@ pub mod env { /// Send an XCM program from the contract to the specified destination. /// See [`pallet_contracts_uapi::HostFn::send_xcm`]. - #[unstable] fn xcm_send( ctx: _, memory: _, From 077041788070eddc6f3c1043b9cb6146585b1469 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 25 Apr 2024 12:01:21 +0300 Subject: [PATCH 02/27] [XCM] Treat recursion limit error as transient in the MQ (#4202) Changes: - Add new error variant `ProcessMessageError::StackLimitReached` and treat XCM error `ExceedsStackLimit` as such. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Branislav Kontur --- .../xcm-builder/src/process_xcm_message.rs | 46 +++++++- polkadot/xcm/xcm-executor/src/lib.rs | 7 ++ prdoc/pr_4202.prdoc | 16 +++ substrate/frame/message-queue/src/lib.rs | 16 ++- substrate/frame/message-queue/src/mock.rs | 3 + substrate/frame/message-queue/src/tests.rs | 101 +++++++++++++++++- .../frame/support/src/traits/messages.rs | 4 + 7 files changed, 187 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_4202.prdoc diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index bcf91d8e68c3..7760274f6e24 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -102,7 +102,12 @@ impl< target: LOG_TARGET, "XCM message execution error: {error:?}", ); - (required, Err(ProcessMessageError::Unsupported)) + let error = match error { + xcm::latest::Error::ExceedsStackLimit => ProcessMessageError::StackLimitReached, + _ => ProcessMessageError::Unsupported, + }; + + (required, Err(error)) }, }; meter.consume(consumed); @@ -148,6 +153,45 @@ mod tests { } } + #[test] + fn process_message_exceeds_limits_fails() { + struct MockedExecutor; + impl ExecuteXcm<()> for MockedExecutor { + type Prepared = xcm_executor::WeighedMessage<()>; + fn prepare( + message: xcm::latest::Xcm<()>, + ) -> core::result::Result> { + Ok(xcm_executor::WeighedMessage::new(Weight::zero(), message)) + } + fn execute( + _: impl Into, + _: Self::Prepared, + _: &mut XcmHash, + _: Weight, + ) -> Outcome { + Outcome::Error { error: xcm::latest::Error::ExceedsStackLimit } + } + fn charge_fees(_location: impl Into, _fees: Assets) -> xcm::latest::Result { + unreachable!() + } + } + + type Processor = ProcessXcmMessage; + + let xcm = VersionedXcm::V4(xcm::latest::Xcm::<()>(vec![ + xcm::latest::Instruction::<()>::ClearOrigin, + ])); + assert_err!( + Processor::process_message( + &xcm.encode(), + ORIGIN, + &mut WeightMeter::new(), + &mut [0; 32] + ), + ProcessMessageError::StackLimitReached, + ); + } + #[test] fn process_message_overweight_fails() { for msg in [v3_xcm(true), v3_xcm(false), v3_xcm(false), v2_xcm(false)] { diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e673a46c4ac6..a7052328da00 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -182,6 +182,13 @@ impl PreparedMessage for WeighedMessage { } } +#[cfg(any(test, feature = "std"))] +impl WeighedMessage { + pub fn new(weight: Weight, message: Xcm) -> Self { + Self(weight, message) + } +} + impl ExecuteXcm for XcmExecutor { type Prepared = WeighedMessage; fn prepare( diff --git a/prdoc/pr_4202.prdoc b/prdoc/pr_4202.prdoc new file mode 100644 index 000000000000..6469c3c78407 --- /dev/null +++ b/prdoc/pr_4202.prdoc @@ -0,0 +1,16 @@ +title: "Treat XCM ExceedsStackLimit errors as transient in the MQ pallet" + +doc: + - audience: Runtime User + description: | + Fixes an issue where the MessageQueue can incorrectly assume that a message will permanently fail to process and disallow retrial of it. + +crates: + - name: frame-support + bump: major + - name: pallet-message-queue + bump: patch + - name: staging-xcm-builder + bump: patch + - name: staging-xcm-executor + bump: patch diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index ec85c785f79e..ef3420d21be5 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -765,6 +765,13 @@ enum MessageExecutionStatus { Processed, /// The message was processed and resulted in a, possibly permanent, error. Unprocessable { permanent: bool }, + /// The stack depth limit was reached. + /// + /// We cannot just return `Unprocessable` in this case, because the processability of the + /// message depends on how the function was called. This may be a permanent error if it was + /// called by a top-level function, or a transient error if it was already called in a nested + /// function. + StackLimitReached, } impl Pallet { @@ -984,7 +991,8 @@ impl Pallet { // additional overweight event being deposited. ) { Overweight | InsufficientWeight => Err(Error::::InsufficientWeight), - Unprocessable { permanent: false } => Err(Error::::TemporarilyUnprocessable), + StackLimitReached | Unprocessable { permanent: false } => + Err(Error::::TemporarilyUnprocessable), Unprocessable { permanent: true } | Processed => { page.note_processed_at_pos(pos); book_state.message_count.saturating_dec(); @@ -1250,7 +1258,7 @@ impl Pallet { let is_processed = match res { InsufficientWeight => return ItemExecutionStatus::Bailed, Unprocessable { permanent: false } => return ItemExecutionStatus::NoProgress, - Processed | Unprocessable { permanent: true } => true, + Processed | Unprocessable { permanent: true } | StackLimitReached => true, Overweight => false, }; @@ -1461,6 +1469,10 @@ impl Pallet { Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); MessageExecutionStatus::Unprocessable { permanent: true } }, + Err(error @ StackLimitReached) => { + Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); + MessageExecutionStatus::StackLimitReached + }, Ok(success) => { // Success let weight_used = meter.consumed().saturating_sub(prev_consumed); diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 1281de6b0a66..66a242d5a18f 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -198,6 +198,7 @@ impl ProcessMessage for RecordingMessageProcessor { parameter_types! { pub static Callback: Box = Box::new(|_, _| {}); + pub static IgnoreStackOvError: bool = false; } /// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or @@ -216,6 +217,8 @@ fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessM Err(ProcessMessageError::Unsupported) } else if msg.ends_with("yield") { Err(ProcessMessageError::Yield) + } else if msg.ends_with("stacklimitreached") && !IgnoreStackOvError::get() { + Err(ProcessMessageError::StackLimitReached) } else { Ok(()) } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index d6788847d571..e89fdb8b3208 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -174,9 +174,10 @@ fn service_queues_failing_messages_works() { MessageQueue::enqueue_message(msg("badformat"), Here); MessageQueue::enqueue_message(msg("corrupt"), Here); MessageQueue::enqueue_message(msg("unsupported"), Here); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); MessageQueue::enqueue_message(msg("yield"), Here); // Starts with four pages. - assert_pages(&[0, 1, 2, 3]); + assert_pages(&[0, 1, 2, 3, 4]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( @@ -206,9 +207,9 @@ fn service_queues_failing_messages_works() { .into(), ); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(System::events().len(), 3); + assert_eq!(System::events().len(), 4); // Last page with the `yield` stays in. - assert_pages(&[3]); + assert_pages(&[4]); }); } @@ -1880,3 +1881,97 @@ fn process_enqueued_on_idle_requires_enough_weight() { assert_eq!(MessagesProcessed::take(), vec![]); }) } + +/// A message that reports `StackLimitReached` will not be put into the overweight queue when +/// executed from the top level. +#[test] +fn process_discards_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + + MessageQueue::service_queues(10.into_weight()); + + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + + assert!(MessagesProcessed::take().is_empty()); + // Message is gone and not overweight: + assert_pages(&[]); + }); +} + +/// A message that reports `StackLimitReached` will stay in the overweight queue when it is executed +/// by `execute_overweight`. +#[test] +fn execute_overweight_keeps_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + // We need to create a mocked message that first reports insufficient weight, and then + // `StackLimitReached`: + IgnoreStackOvError::set(true); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + MessageQueue::service_queues(0.into_weight()); + + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"stacklimitreached"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + // Does not count as 'processed': + assert!(MessagesProcessed::take().is_empty()); + assert_pages(&[0]); + + // Now let it return `StackLimitReached`. Note that this case would normally not happen, + // since we assume that the top-level execution is the one with the most remaining stack + // depth. + IgnoreStackOvError::set(false); + // Ensure that trying to execute the message does not change any state (besides events). + System::reset_events(); + let storage_noop = StorageNoopGuard::new(); + assert_eq!( + ::execute_overweight(3.into_weight(), (Here, 0, 0)), + Err(ExecuteOverweightError::Other) + ); + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + System::reset_events(); + drop(storage_noop); + + // Now let's process it normally: + IgnoreStackOvError::set(true); + assert_eq!( + ::execute_overweight(1.into_weight(), (Here, 0, 0)) + .unwrap(), + 1.into_weight() + ); + + assert_last_event::( + Event::Processed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + weight_used: 1.into_weight(), + success: true, + } + .into(), + ); + assert_pages(&[]); + System::reset_events(); + }); +} diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index f3d893bcc1d8..2eec606b6d18 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -46,6 +46,8 @@ pub enum ProcessMessageError { /// the case that a queue is re-serviced within the same block after *yielding*. A queue is /// not required to *yield* again when it is being re-serviced withing the same block. Yield, + /// The message could not be processed for reaching the stack depth limit. + StackLimitReached, } /// Can process messages from a specific origin. @@ -96,6 +98,8 @@ pub trait ServiceQueues { /// - `weight_limit`: The maximum amount of dynamic weight that this call can use. /// /// Returns the dynamic weight used by this call; is never greater than `weight_limit`. + /// Should only be called in top-level runtime entry points like `on_initialize` or `on_idle`. + /// Otherwise, stack depth limit errors may be miss-handled. fn service_queues(weight_limit: Weight) -> Weight; /// Executes a message that could not be executed by [`Self::service_queues()`] because it was From 239a23d9cc712aab8c0a87eab7e558e5a149fd42 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:11:07 +0300 Subject: [PATCH 03/27] Fix polkadot parachains not producing blocks until next session (#4269) ... a few sessions too late :(, this already happened on polkadot, so as of now there are no known relay-chains without async backing enabled in runtime, but let's fix it in case someone else wants to repeat our steps. Fixes: https://github.com/paritytech/polkadot-sdk/issues/4226 --------- Signed-off-by: Alexandru Gheorghe --- .../node/network/statement-distribution/src/v2/mod.rs | 11 ++++++++++- .../statement-distribution/src/v2/tests/mod.rs | 6 ++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 68caa5f0e700..118e34e92063 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -826,7 +826,16 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); state.per_session.retain(|s, _| sessions.contains(s)); - state.unused_topologies.retain(|s, _| sessions.contains(s)); + + let last_session_index = state.unused_topologies.keys().max().copied(); + // Do not clean-up the last saved toplogy unless we moved to the next session + // This is needed because handle_deactive_leaves, gets also called when + // prospective_parachains APIs are not present, so we would actually remove + // the topology without using it because `per_relay_parent` is empty until + // prospective_parachains gets enabled + state + .unused_topologies + .retain(|s, _| sessions.contains(s) || last_session_index == Some(*s)); } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 8dda7219cd12..3d987d3fc433 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -509,6 +509,12 @@ async fn setup_test_and_connect_peers( // Send gossip topology and activate leaf. if send_topology_before_leaf { send_new_topology(overseer, state.make_dummy_topology()).await; + // Send cleaning up of a leaf to make sure it does not clear the save topology as well. + overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(Hash::random()), + ))) + .await; activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; } else { activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; From c26cf3f6f2d2b7f7783703308ece440c338459f8 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:16:12 +0200 Subject: [PATCH 04/27] Do not re-prepare PVFs if not needed (#4211) Currently, PVFs are re-prepared if any execution environment parameter changes. As we've recently seen on Kusama and Polkadot, that may lead to a severe finality lag because every validator has to re-prepare every PVF. That cannot be avoided altogether; however, we could cease re-preparing PVFs when a change in the execution environment can't lead to a change in the artifact itself. For example, it's clear that changing the execution timeout cannot affect the artifact. In this PR, I'm introducing a separate hash for the subset of execution environment parameters that changes only if a preparation-related parameter changes. It introduces some minor code duplication, but without that, the scope of changes would be much bigger. TODO: - [x] Add a test to ensure the artifact is not re-prepared if non-preparation-related parameter is changed - [x] Add a test to ensure the artifact is re-prepared if a preparation-related parameter is changed - [x] Add comments, warnings, and, possibly, a test to ensure a new parameter ever added to the executor environment parameters will be evaluated by the author of changes with respect to its artifact preparation impact and added to the new hash preimage if needed. Closes #4132 --- polkadot/node/core/pvf/src/artifacts.rs | 19 ++-- polkadot/node/core/pvf/tests/it/main.rs | 72 +++++++++++++- polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v7/executor_params.rs | 99 +++++++++++++++++++ polkadot/primitives/src/v7/mod.rs | 4 +- prdoc/pr_4211.prdoc | 15 +++ 6 files changed, 201 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4211.prdoc diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 6288755526d4..a3a48b61acb1 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -58,7 +58,7 @@ use crate::{host::PrecheckResultSender, worker_interface::WORKER_DIR_PREFIX}; use always_assert::always; use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; -use polkadot_primitives::ExecutorParamsHash; +use polkadot_primitives::ExecutorParamsPrepHash; use std::{ collections::HashMap, fs, @@ -85,22 +85,27 @@ pub fn generate_artifact_path(cache_path: &Path) -> PathBuf { artifact_path } -/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. +/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of preparation-related +/// executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { pub(crate) code_hash: ValidationCodeHash, - pub(crate) executor_params_hash: ExecutorParamsHash, + pub(crate) executor_params_prep_hash: ExecutorParamsPrepHash, } impl ArtifactId { /// Creates a new artifact ID with the given hash. - pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { - Self { code_hash, executor_params_hash } + pub fn new( + code_hash: ValidationCodeHash, + executor_params_prep_hash: ExecutorParamsPrepHash, + ) -> Self { + Self { code_hash, executor_params_prep_hash } } - /// Returns an artifact ID that corresponds to the PVF with given executor params. + /// Returns an artifact ID that corresponds to the PVF with given preparation-related + /// executor parameters. pub fn from_pvf_prep_data(pvf: &PvfPrepData) -> Self { - Self::new(pvf.code_hash(), pvf.executor_params().hash()) + Self::new(pvf.code_hash(), pvf.executor_params().prep_hash()) } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 56cc681aff38..6961b93832ab 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -26,7 +26,7 @@ use polkadot_node_core_pvf::{ ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; -use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use std::{io::Write, time::Duration}; use tokio::sync::Mutex; @@ -559,3 +559,73 @@ async fn nonexistent_cache_dir() { .await .unwrap(); } + +// Checks the the artifact is not re-prepared when the executor environment parameters change +// in a way not affecting the preparation +#[tokio::test] +async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2500)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + + let md1 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + // FS times are not monotonical so we wait 2 secs here to be sure that the creation time of the + // second attifact will be different + tokio::time::sleep(Duration::from_secs(2)).await; + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + + let md2 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + assert_eq!(md1.created().unwrap(), md2.created().unwrap()); +} + +// Checks if the artifact is re-prepared if the re-preparation is needed by the nature of +// the execution environment parameters change +#[tokio::test] +async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 60000)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 2); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 3); // new artifact has been added +} diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index d4eeb3cc3d29..01f393086a66 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -44,7 +44,7 @@ pub use v7::{ CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, diff --git a/polkadot/primitives/src/v7/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs index 1e19f3b23fec..918a7f17a7e3 100644 --- a/polkadot/primitives/src/v7/executor_params.rs +++ b/polkadot/primitives/src/v7/executor_params.rs @@ -152,13 +152,42 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { } } +/// Unit type wrapper around [`type@Hash`] that represents a hash of preparation-related +/// executor parameters. +/// +/// This type is produced by [`ExecutorParams::prep_hash`]. +#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, PartialOrd, Ord, TypeInfo)] +pub struct ExecutorParamsPrepHash(Hash); + +impl sp_std::fmt::Display for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + self.0.fmt(f) + } +} + +impl sp_std::fmt::Debug for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl sp_std::fmt::LowerHex for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + sp_std::fmt::LowerHex::fmt(&self.0, f) + } +} + /// # Deterministically serialized execution environment semantics /// Represents an arbitrary semantics of an arbitrary execution environment, so should be kept as /// abstract as possible. +// // ADR: For mandatory entries, mandatoriness should be enforced in code rather than separating them // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. +// +// !!! Any new parameter that does not affect the prepared artifact must be added to the exclusion +// !!! list in `prep_hash()` to avoid unneccessary artifact rebuilds. #[derive( Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, )] @@ -175,6 +204,28 @@ impl ExecutorParams { ExecutorParamsHash(BlakeTwo256::hash(&self.encode())) } + /// Returns hash of preparation-related executor parameters + pub fn prep_hash(&self) -> ExecutorParamsPrepHash { + use ExecutorParam::*; + + let mut enc = b"prep".to_vec(); + + self.0 + .iter() + .flat_map(|param| match param { + MaxMemoryPages(..) => None, + StackLogicalMax(..) => Some(param), + StackNativeMax(..) => None, + PrecheckingMaxMemory(..) => None, + PvfPrepTimeout(..) => Some(param), + PvfExecTimeout(..) => None, + WasmExtBulkMemory => Some(param), + }) + .for_each(|p| enc.extend(p.encode())); + + ExecutorParamsPrepHash(BlakeTwo256::hash(&enc)) + } + /// Returns a PVF preparation timeout, if any pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { @@ -336,3 +387,51 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } + +// This test ensures the hash generated by `prep_hash()` changes if any preparation-related +// executor parameter changes. If you're adding a new executor parameter, you must add it into +// this test, and if changing that parameter may not affect the artifact produced on the +// preparation step, it must be added to the list of exlusions in `pre_hash()` as well. +// See also `prep_hash()` comments. +#[test] +fn ensure_prep_hash_changes() { + use ExecutorParam::*; + let ep = ExecutorParams::from( + &[ + MaxMemoryPages(0), + StackLogicalMax(0), + StackNativeMax(0), + PrecheckingMaxMemory(0), + PvfPrepTimeout(PvfPrepKind::Precheck, 0), + PvfPrepTimeout(PvfPrepKind::Prepare, 0), + PvfExecTimeout(PvfExecKind::Backing, 0), + PvfExecTimeout(PvfExecKind::Approval, 0), + WasmExtBulkMemory, + ][..], + ); + + for p in ep.iter() { + let (ep1, ep2) = match p { + MaxMemoryPages(_) => continue, + StackLogicalMax(_) => ( + ExecutorParams::from(&[StackLogicalMax(1)][..]), + ExecutorParams::from(&[StackLogicalMax(2)][..]), + ), + StackNativeMax(_) => continue, + PrecheckingMaxMemory(_) => continue, + PvfPrepTimeout(PvfPrepKind::Precheck, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 2)][..]), + ), + PvfPrepTimeout(PvfPrepKind::Prepare, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 2)][..]), + ), + PvfExecTimeout(_, _) => continue, + WasmExtBulkMemory => + (ExecutorParams::default(), ExecutorParams::from(&[WasmExtBulkMemory][..])), + }; + + assert_ne!(ep1.prep_hash(), ep2.prep_hash()); + } +} diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 5647bfe68d56..8a059408496c 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -62,7 +62,9 @@ pub mod executor_params; pub mod slashing; pub use async_backing::AsyncBackingParams; -pub use executor_params::{ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash}; +pub use executor_params::{ + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, +}; mod metrics; pub use metrics::{ diff --git a/prdoc/pr_4211.prdoc b/prdoc/pr_4211.prdoc new file mode 100644 index 000000000000..161dc8485e83 --- /dev/null +++ b/prdoc/pr_4211.prdoc @@ -0,0 +1,15 @@ +title: "Re-prepare PVF artifacts only if needed" + +doc: + - audience: Node Dev + description: | + When a change in the executor environment parameters can not affect the prepared artifact, + it is preserved without recompilation and used for future executions. That mitigates + situations where every unrelated executor parameter change resulted in re-preparing every + artifact on every validator, causing a significant finality lag. + +crates: + - name: polkadot-node-core-pvf + bump: minor + - name: polkadot-primitives + bump: minor From ff2b178206f9952c3337638659450c67fd700e7e Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 25 Apr 2024 22:01:05 +1000 Subject: [PATCH 05/27] remote-externalities: retry get child keys query (#4280) --- .../frame/remote-externalities/src/lib.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index e429d39669f1..58cb901470c1 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -830,19 +830,22 @@ where child_prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - // This is deprecated and will generate a warning which causes the CI to fail. - #[allow(warnings)] - let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( - client, - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix, - Some(at), - ) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); + let get_child_keys_closure = || { + #[allow(deprecated)] + substrate_rpc_client::ChildStateApi::storage_keys( + client, + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_prefix.clone(), + Some(at), + ) + }; + let child_keys = + Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; debug!( target: LOG_TARGET, From c9923cd7feb9e7c6337f0942abd3279468df5559 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 25 Apr 2024 16:52:24 +0300 Subject: [PATCH 06/27] rename fragment_tree folder to fragment_chain (#4294) Makes https://github.com/paritytech/polkadot-sdk/pull/4035 easier to review --- .../src/{fragment_tree => fragment_chain}/mod.rs | 0 .../src/{fragment_tree => fragment_chain}/tests.rs | 0 polkadot/node/core/prospective-parachains/src/lib.rs | 12 ++++++------ 3 files changed, 6 insertions(+), 6 deletions(-) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/mod.rs (100%) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/tests.rs (100%) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index f5d50fb74fac..0b1a2e034a28 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -55,13 +55,13 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_tree::{ + fragment_chain::{ CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, }, }; mod error; -mod fragment_tree; +mod fragment_chain; #[cfg(test)] mod tests; @@ -349,7 +349,7 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - compact: crate::fragment_tree::PendingAvailability, + compact: crate::fragment_chain::PendingAvailability, } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -394,7 +394,7 @@ async fn preprocess_candidates_pending_availability( relay_parent_number: relay_parent.number, relay_parent_storage_root: relay_parent.storage_root, }, - compact: crate::fragment_tree::PendingAvailability { + compact: crate::fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, relay_parent, }, @@ -675,7 +675,7 @@ fn answer_hypothetical_frontier_request( let candidate_hash = c.candidate_hash(); let hypothetical = match c { HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => - fragment_tree::HypotheticalCandidate::Complete { + fragment_chain::HypotheticalCandidate::Complete { receipt: Cow::Borrowed(receipt), persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, @@ -683,7 +683,7 @@ fn answer_hypothetical_frontier_request( parent_head_data_hash, candidate_relay_parent, .. - } => fragment_tree::HypotheticalCandidate::Incomplete { + } => fragment_chain::HypotheticalCandidate::Incomplete { relay_parent: *candidate_relay_parent, parent_head_data_hash: *parent_head_data_hash, }, From 8f5c8f735af9048b83957821db7fb363e89e919f Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:04:20 +0200 Subject: [PATCH 07/27] Update approval-voting banchmarks base values (#4283) --- .../benches/approval-voting-regression-bench.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 7157362a79c7..9a5f0d29dbd3 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -77,12 +77,12 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 52944.7000, 0.001), - ("Sent to peers", 63532.2000, 0.001), + ("Received from peers", 52942.4600, 0.001), + ("Sent to peers", 63547.0330, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 7.7883, 0.1), - ("approval-voting", 10.4655, 0.1), + ("approval-distribution", 7.0317, 0.1), + ("approval-voting", 9.5751, 0.1), ])); if messages.is_empty() { From dd5b06e622c6c5c301a1554286ec1f4995c7daca Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:06:37 +0200 Subject: [PATCH 08/27] [subsystem-benchmarks] Log standart deviation for subsystem-benchmarks (#4285) Should help us to understand more what's happening between individual runs and possibly adjust the number of runs --- polkadot/node/subsystem-bench/src/lib/usage.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 59296746ec3d..bfaac3265a2e 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -161,6 +161,13 @@ impl ResourceUsage { for (resource_name, values) in by_name { let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + let per_block_sd = + standard_deviation(&values.iter().map(|v| v.per_block).collect::>()); + println!( + "[{}] standart_deviation {:.2}%", + resource_name, + per_block_sd / per_block * 100.0 + ); average.push(Self { resource_name, total, per_block }); } average @@ -179,3 +186,11 @@ pub struct ChartItem { pub unit: String, pub value: f64, } + +fn standard_deviation(values: &[f64]) -> f64 { + let n = values.len() as f64; + let mean = values.iter().sum::() / n; + let variance = values.iter().map(|v| (v - mean).powi(2)).sum::() / (n - 1.0); + + variance.sqrt() +} From 8f8c49deffe56567ba5cde0e1047de15b660bb0e Mon Sep 17 00:00:00 2001 From: Noah Jelich <12912633+njelich@users.noreply.github.com> Date: Fri, 26 Apr 2024 09:03:53 +0200 Subject: [PATCH 09/27] Fix bad links (#4231) The solochain template links to parachain template instead of solochain. --- templates/solochain/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6390c9524ce1..37c65797dcb0 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -4,10 +4,10 @@ A fresh [Substrate](https://substrate.io/) node, ready for hacking :rocket: A standalone version of this template is available for each release of Polkadot in the [Substrate Developer Hub Parachain -Template](https://github.com/substrate-developer-hub/substrate-parachain-template/) +Template](https://github.com/substrate-developer-hub/substrate-node-template/) repository. The parachain template is generated directly at each Polkadot -release branch from the [Node Template in -Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template) +release branch from the [Solochain Template in +Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/templates/solochain) upstream It is usually best to use the stand-alone version to start a new project. All From e8f7c81db66abb40802c582c22041aa63c78ddff Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 26 Apr 2024 11:16:03 +0300 Subject: [PATCH 10/27] [balances] Safeguard against consumer ref underflow (#3865) There are some accounts that do not have a consumer ref while having a reserve. This adds a fail-safe mechanism to trigger in the case that `does_consume` is true, but the assumption of `consumer>0` is not. This should prevent those accounts from loosing balance and the TI from getting messed up even more, but is not an "ideal" fix. TBH an ideal fix is not possible, since on-chain data is in an invalid state. --------- Signed-off-by: Oliver Tale-Yazdi --- prdoc/pr_3865.prdoc | 11 ++ substrate/frame/balances/Cargo.toml | 1 + substrate/frame/balances/src/lib.rs | 7 ++ .../frame/balances/src/tests/general_tests.rs | 111 ++++++++++++++++++ substrate/frame/balances/src/tests/mod.rs | 20 +++- substrate/frame/balances/src/types.rs | 2 +- 6 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3865.prdoc create mode 100644 substrate/frame/balances/src/tests/general_tests.rs diff --git a/prdoc/pr_3865.prdoc b/prdoc/pr_3865.prdoc new file mode 100644 index 000000000000..8e39c04825b1 --- /dev/null +++ b/prdoc/pr_3865.prdoc @@ -0,0 +1,11 @@ +title: "Balances: add failsafe for consumer ref underflow" + +doc: + - audience: Runtime Dev + description: | + Pallet balances now handles the case that historic accounts violate a invariant that they should have a consumer ref on `reserved > 0` balance. + This disallows such accounts from reaping and should prevent TI from getting messed up even more. + +crates: + - name: pallet-balances + bump: patch diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 28eabdaf5062..1cc9ac5d8fd2 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -28,6 +28,7 @@ docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } +frame-support = { path = "../support", features = ["experimental"] } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } paste = "1.0.12" diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 685b12499ac0..bd811955d63c 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -954,6 +954,13 @@ pub mod pallet { if !did_consume && does_consume { frame_system::Pallet::::inc_consumers(who)?; } + if does_consume && frame_system::Pallet::::consumers(who) == 0 { + // NOTE: This is a failsafe and should not happen for normal accounts. A normal + // account should have gotten a consumer ref in `!did_consume && does_consume` + // at some point. + log::error!(target: LOG_TARGET, "Defensively bumping a consumer ref."); + frame_system::Pallet::::inc_consumers(who)?; + } if did_provide && !does_provide { // This could reap the account so must go last. frame_system::Pallet::::dec_providers(who).map_err(|r| { diff --git a/substrate/frame/balances/src/tests/general_tests.rs b/substrate/frame/balances/src/tests/general_tests.rs new file mode 100644 index 000000000000..0f3e015d0a89 --- /dev/null +++ b/substrate/frame/balances/src/tests/general_tests.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + system::AccountInfo, + tests::{ensure_ti_valid, Balances, ExtBuilder, System, Test, TestId, UseSystem}, + AccountData, ExtraFlags, TotalIssuance, +}; +use frame_support::{ + assert_noop, assert_ok, hypothetically, + traits::{ + fungible::{Mutate, MutateHold}, + tokens::Precision, + }, +}; +use sp_runtime::DispatchError; + +/// There are some accounts that have one consumer ref too few. These accounts are at risk of losing +/// their held (reserved) balance. They do not just lose it - it is also not accounted for in the +/// Total Issuance. Here we test the case that the account does not reap in such a case, but gets +/// one consumer ref for its reserved balance. +#[test] +fn regression_historic_acc_does_not_evaporate_reserve() { + ExtBuilder::default().build_and_execute_with(|| { + UseSystem::set(true); + let (alice, bob) = (0, 1); + // Alice is in a bad state with consumer == 0 && reserved > 0: + Balances::set_balance(&alice, 100); + TotalIssuance::::put(100); + ensure_ti_valid(); + + assert_ok!(Balances::hold(&TestId::Foo, &alice, 10)); + // This is the issue of the account: + System::dec_consumers(&alice); + + assert_eq!( + System::account(&alice), + AccountInfo { + data: AccountData { + free: 90, + reserved: 10, + frozen: 0, + flags: ExtraFlags(1u128 << 127), + }, + nonce: 0, + consumers: 0, // should be 1 on a good acc + providers: 1, + sufficients: 0, + } + ); + + ensure_ti_valid(); + + // Reaping the account is prevented by the new logic: + assert_noop!( + Balances::transfer_allow_death(Some(alice).into(), bob, 90), + DispatchError::ConsumerRemaining + ); + assert_noop!( + Balances::transfer_all(Some(alice).into(), bob, false), + DispatchError::ConsumerRemaining + ); + + // normal transfers still work: + hypothetically!({ + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + hypothetically!({ + assert_ok!(Balances::transfer_all(Some(alice).into(), bob, true)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + + // un-reserving all does not add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 10, Precision::Exact)); + assert_eq!(System::consumers(&alice), 0); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 0); + ensure_ti_valid(); + }); + // un-reserving some does add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 5, Precision::Exact)); + assert_eq!(System::consumers(&alice), 1); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 234fe6eaf2c3..0abf2251290f 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; +use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet, TotalIssuance}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl, @@ -47,6 +47,7 @@ mod currency_tests; mod dispatchable_tests; mod fungible_conformance_tests; mod fungible_tests; +mod general_tests; mod reentrancy_tests; type Block = frame_system::mocking::MockBlock; @@ -278,6 +279,23 @@ pub fn info_from_weight(w: Weight) -> DispatchInfo { DispatchInfo { weight: w, ..Default::default() } } +/// Check that the total-issuance matches the sum of all accounts' total balances. +pub fn ensure_ti_valid() { + let mut sum = 0; + + for acc in frame_system::Account::::iter_keys() { + if UseSystem::get() { + let data = frame_system::Pallet::::account(acc); + sum += data.data.total(); + } else { + let data = crate::Account::::get(acc); + sum += data.total(); + } + } + + assert_eq!(TotalIssuance::::get(), sum, "Total Issuance wrong"); +} + #[test] fn weights_sane() { let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs index 69d33bb023f3..3e36a83575c8 100644 --- a/substrate/frame/balances/src/types.rs +++ b/substrate/frame/balances/src/types.rs @@ -111,7 +111,7 @@ pub struct AccountData { const IS_NEW_LOGIC: u128 = 0x80000000_00000000_00000000_00000000u128; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct ExtraFlags(u128); +pub struct ExtraFlags(pub(crate) u128); impl Default for ExtraFlags { fn default() -> Self { Self(IS_NEW_LOGIC) From c66d8a84687f5d68c0192122aa513b4b340794ca Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 12:24:42 +0300 Subject: [PATCH 11/27] Bump bridges relay version + uncomment bridges zombeinet tests (#4289) TODOs: - [x] wait and see if test `1` works; - [x] ~think of whether we need remaining tests.~ I think we should keep it - will try to revive and update it --- .gitlab/pipeline/zombienet.yml | 4 +--- .gitlab/pipeline/zombienet/bridges.yml | 4 ++-- ...hen-idle.js => multiple-headers-synced.js} | 22 +++++-------------- .../rococo-to-westend.zndsl | 20 +++++++++++++++++ .../run.sh | 2 +- .../westend-to-rococo.zndsl | 20 +++++++++++++++++ .../rococo-to-westend.zndsl | 8 ------- .../westend-to-rococo.zndsl | 7 ------ ...ridges_zombienet_tests_injected.Dockerfile | 2 +- 9 files changed, 51 insertions(+), 38 deletions(-) rename bridges/testing/framework/js-helpers/{only-mandatory-headers-synced-when-idle.js => multiple-headers-synced.js} (61%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl rename bridges/testing/tests/{0002-mandatory-headers-synced-while-idle => 0002-free-headers-synced-while-idle}/run.sh (90%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index e306cb43c027..52948e1eb719 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -12,6 +12,4 @@ include: # polkadot tests - .gitlab/pipeline/zombienet/polkadot.yml # bridges tests - # TODO: https://github.com/paritytech/parity-bridges-common/pull/2884 - # commenting until we have a new relatye, compatible with updated fees scheme - # - .gitlab/pipeline/zombienet/bridges.yml + - .gitlab/pipeline/zombienet/bridges.yml diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml index 4278f59b1e9a..9d7a8b931193 100644 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ b/.gitlab/pipeline/zombienet/bridges.yml @@ -55,9 +55,9 @@ zombienet-bridges-0001-asset-transfer-works: - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0001-asset-transfer --docker - echo "Done" -zombienet-bridges-0002-mandatory-headers-synced-while-idle: +zombienet-bridges-0002-free-headers-synced-while-idle: extends: - .zombienet-bridges-common script: - - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-mandatory-headers-synced-while-idle --docker + - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-free-headers-synced-while-idle --docker - echo "Done" diff --git a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/multiple-headers-synced.js similarity index 61% rename from bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/multiple-headers-synced.js index 979179245ebe..a30efc821657 100644 --- a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js +++ b/bridges/testing/framework/js-helpers/multiple-headers-synced.js @@ -10,33 +10,23 @@ async function run(nodeName, networkInfo, args) { // start listening to new blocks let totalGrandpaHeaders = 0; - let initialParachainHeaderImported = false; + let totalParachainHeaders = 0; api.rpc.chain.subscribeNewHeads(async function (header) { const apiAtParent = await api.at(header.parentHash); const apiAtCurrent = await api.at(header.hash); const currentEvents = await apiAtCurrent.query.system.events(); - totalGrandpaHeaders += await utils.ensureOnlyMandatoryGrandpaHeadersImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - initialParachainHeaderImported = await utils.ensureOnlyInitialParachainHeaderImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); + totalGrandpaHeaders += await utils.countGrandpaHeaderImports(bridgedChain, currentEvents); + totalParachainHeaders += await utils.countParachainHeaderImports(bridgedChain, currentEvents); }); // wait given time await new Promise(resolve => setTimeout(resolve, exitAfterSeconds * 1000)); - // if we haven't seen any new GRANDPA or parachain headers => fail - if (totalGrandpaHeaders == 0) { + // if we haven't seen many (>1) new GRANDPA or parachain headers => fail + if (totalGrandpaHeaders <= 1) { throw new Error("No bridged relay chain headers imported"); } - if (!initialParachainHeaderImported) { + if (totalParachainHeaders <= 1) { throw new Error("No bridged parachain headers imported"); } } diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl new file mode 100644 index 000000000000..0f779caa87cd --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Rococo (and a single Rococo BH) headers to Westend BH. +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,rococo-at-westend" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh similarity index 90% rename from bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh rename to bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh index 32419dc84f59..9d19a9688f94 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh @@ -22,7 +22,7 @@ echo # which is expected to be 60 seconds for the test environment. echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 -${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid +${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/westend-to-rococo.zndsl $rococo_dir diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl new file mode 100644 index 000000000000..7a6f1ec379d2 --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Westend (and a single Westend BH) headers to Rococo BH. +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,westend-at-rococo" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl deleted file mode 100644 index 6e381f537732..000000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl +++ /dev/null @@ -1,8 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds - diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl deleted file mode 100644 index b4b3e4367916..000000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl +++ /dev/null @@ -1,7 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 938f5cc45a11..196ba861f503 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.2.1 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 # metadata ARG VCS_REF From d212fc7a41fc72299913737c5fea2f3fcfe0a253 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Fri, 26 Apr 2024 13:24:03 +0200 Subject: [PATCH 12/27] review-bot: reverted #4271 and added `workflow_dispatch` (#4293) This PR includes two changes: - added `workflow_dispatch` to review bot - reverted #4271 ### Added `workflow_dispatch` to review bot This allows us, in the case that review-bot fails for some fork reasons, to trigger it manually ensuring that we can overcame the problem with the multiple actions while we look for a solution. image ### Reverted #4271 Unfortunately, the changes added in #4271 do not work in forks. Here is a lengthy discussion of many individuals facing the same problem as me: - [GitHub Action `pull_request` attribute empty in `workflow_run` event object for PR from forked repo #25220](https://github.com/orgs/community/discussions/25220) So I had to revert it (but I updated the dependencies to latest). #### Miscellaneous changes I added a debug log at the end of review bot in case it fails so we can easily debug it without having to make a lot of boilerplate and forks to duplicate the environment. --- .github/workflows/review-bot.yml | 19 ++++++++++++++++++- .github/workflows/review-trigger.yml | 13 +++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index fb877357b232..f1401406ae47 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -5,6 +5,12 @@ on: - Review-Trigger types: - completed + workflow_dispatch: + inputs: + pr-number: + description: "Number of the PR to evaluate" + required: true + type: number jobs: review-approvals: @@ -17,6 +23,12 @@ jobs: with: app-id: ${{ secrets.REVIEW_APP_ID }} private-key: ${{ secrets.REVIEW_APP_KEY }} + - name: Extract content of artifact + if: ${{ !inputs.pr-number }} + id: number + uses: Bullrich/extract-text-from-artifact@v1.0.1 + with: + artifact-name: pr_number - name: "Evaluates PR reviews and assigns reviewers" uses: paritytech/review-bot@v2.4.0 with: @@ -24,5 +36,10 @@ jobs: team-token: ${{ steps.app_token.outputs.token }} checks-token: ${{ steps.app_token.outputs.token }} # This is extracted from the triggering event - pr-number: ${{ github.event.workflow_run.pull_requests[0].number }} + pr-number: ${{ inputs.pr-number || steps.number.outputs.content }} request-reviewers: true + - name: Log payload + if: ${{ failure() || runner.debug }} + run: echo "::debug::$payload" + env: + payload: ${{ toJson(github.event) }} diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 6437be161d34..ec4a62afc0c7 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -58,3 +58,16 @@ jobs: env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} + - name: Get PR number + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + echo "Saving PR number: $PR_NUMBER" + mkdir -p ./pr + echo $PR_NUMBER > ./pr/pr_number + - uses: actions/upload-artifact@v4 + name: Save PR number + with: + name: pr_number + path: pr/ + retention-days: 5 From 9a48cd707ed7f4034aadb8dc05065080ad102037 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 14:26:05 +0300 Subject: [PATCH 13/27] Bridges: added helper function to relay single GRANDPA proof + header (#4307) related to https://github.com/paritytech/parity-bridges-common/issues/2962 silent, because the actual code for subcommand is added in the `parity-bridges-common` repo, where binary lives --------- Co-authored-by: Adrian Catangiu --- .../src/cli/relay_headers.rs | 39 ++++++++++++++++++- .../lib-substrate-relay/src/finality/mod.rs | 37 +++++++++++++++++- 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index cf1957c7323b..093f98ef21ed 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -19,7 +19,10 @@ use async_trait::async_trait; use structopt::StructOpt; -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; +use relay_utils::{ + metrics::{GlobalMetrics, StandaloneMetric}, + UniqueSaturatedInto, +}; use crate::{ cli::{bridge::*, chain_schema::*, PrometheusParams}, @@ -48,6 +51,21 @@ pub struct RelayHeadersParams { prometheus_params: PrometheusParams, } +/// Single header relaying params. +#[derive(StructOpt)] +pub struct RelayHeaderParams { + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Number of the source chain header that we want to relay. It must have a persistent + /// storage proof at the [`Self::source`] node, otherwise the command will fail. + #[structopt(long)] + number: u128, +} + impl RelayHeadersParams { fn headers_to_relay(&self) -> HeadersToRelay { match (self.only_mandatory_headers, self.only_free_headers) { @@ -89,4 +107,23 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { ) .await } + + /// Relay single header. No checks are made to ensure that transaction will succeed. + async fn relay_header(data: RelayHeaderParams) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let target_transactions_mortality = data.target_sign.target_transactions_mortality; + let target_sign = data.target_sign.to_keypair::()?; + + crate::finality::relay_single_header::( + source_client, + target_client, + crate::TransactionParams { + signer: target_sign, + mortality: target_transactions_mortality, + }, + data.number.unique_saturated_into(), + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index a06857ae1d9b..0293e1da224a 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -25,13 +25,15 @@ use crate::{ use async_trait::async_trait; use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; +use finality_relay::{ + FinalityPipeline, FinalitySyncPipeline, HeadersToRelay, SourceClient, TargetClient, +}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, ChainWithTransactions, Client, HashOf, HeaderOf, SyncHeader, }; -use relay_utils::metrics::MetricsParams; +use relay_utils::{metrics::MetricsParams, TrackedTransactionStatus, TransactionTracker}; use sp_core::Pair; use std::{fmt::Debug, marker::PhantomData}; @@ -274,3 +276,34 @@ pub async fn run( .await .map_err(|e| anyhow::format_err!("{}", e)) } + +/// Relay single header. No checks are made to ensure that transaction will succeed. +pub async fn relay_single_header( + source_client: Client, + target_client: Client, + transaction_params: TransactionParams>, + header_number: BlockNumberOf, +) -> anyhow::Result<()> { + let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; + let Some(proof) = proof else { + return Err(anyhow::format_err!( + "Unable to submit {} header #{} to {}: no finality proof", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )); + }; + + let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => Err(anyhow::format_err!( + "Transaction with {} header #{} is considered lost at {}", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )), + } +} From 97f74253387ee43e30c25fd970b5ae4cc1a722d7 Mon Sep 17 00:00:00 2001 From: gui Date: Fri, 26 Apr 2024 21:27:14 +0900 Subject: [PATCH 14/27] Try state: log errors instead of loggin the number of error and discarding them (#4265) Currently we discard errors content We should at least log it. Code now is more similar to what is written in try_on_runtime_upgrade. label should be R0 --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Liam Aharon Co-authored-by: Javier Bullrich --- .../support/src/traits/try_runtime/mod.rs | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/substrate/frame/support/src/traits/try_runtime/mod.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs index bec2dbf549a1..c1bf1feb19e5 100644 --- a/substrate/frame/support/src/traits/try_runtime/mod.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -161,22 +161,31 @@ impl TryState Ok(()), Select::All => { - let mut error_count = 0; + let mut errors = Vec::::new(); + for_tuples!(#( - if let Err(_) = Tuple::try_state(n.clone(), targets.clone()) { - error_count += 1; + if let Err(err) = Tuple::try_state(n.clone(), targets.clone()) { + errors.push(err); } )*); - if error_count > 0 { + if !errors.is_empty() { log::error!( target: "try-runtime", - "{} pallets exited with errors while executing try_state checks.", - error_count + "Detected errors while executing `try_state`:", ); + errors.iter().for_each(|err| { + log::error!( + target: "try-runtime", + "{:?}", + err + ); + }); + return Err( - "Detected errors while executing try_state checks. See logs for more info." + "Detected errors while executing `try_state` checks. See logs for more \ + info." .into(), ) } From 988e30f102b155ab68d664d62ac5c73da171659a Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Fri, 26 Apr 2024 16:28:08 +0300 Subject: [PATCH 15/27] Implementation of the new validator disabling strategy (#2226) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/polkadot-sdk/issues/1966, https://github.com/paritytech/polkadot-sdk/issues/1963 and https://github.com/paritytech/polkadot-sdk/issues/1962. Disabling strategy specification [here](https://github.com/paritytech/polkadot-sdk/pull/2955). (Updated 13/02/2024) Implements: * validator disabling for a whole era instead of just a session * no more than 1/3 of the validators in the active set are disabled Removes: * `DisableStrategy` enum - now each validator committing an offence is disabled. * New era is not forced if too many validators are disabled. Before this PR not all offenders were disabled. A decision was made based on [`enum DisableStrategy`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/primitives/staking/src/offence.rs#L54). Some offenders were disabled for a whole era, some just for a session, some were not disabled at all. This PR changes the disabling behaviour. Now a validator committing an offense is disabled immediately till the end of the current era. Some implementation notes: * `OffendingValidators` in pallet session keeps all offenders (this is not changed). However its type is changed from `Vec<(u32, bool)>` to `Vec`. The reason is simple - each offender is getting disabled so the bool doesn't make sense anymore. * When a validator is disabled it is first added to `OffendingValidators` and then to `DisabledValidators`. This is done in [`add_offending_validator`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/slashing.rs#L325) from staking pallet. * In [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) the `end_session` also calls [`end_era`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L490) when an era ends. In this case `OffendingValidators` are cleared **(1)**. * Then in [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) `DisabledValidators` are cleared **(2)** * And finally (still in `rotate_session`) a call to [`start_session`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L430) repopulates the disabled validators **(3)**. * The reason for this complication is that session pallet knows nothing abut eras. To overcome this on each new session the disabled list is repopulated (points 2 and 3). Staking pallet knows when a new era starts so with point 1 it ensures that the offenders list is cleared. --------- Co-authored-by: ordian Co-authored-by: ordian Co-authored-by: Maciej Co-authored-by: Gonçalo Pestana Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../parachains/src/disputes/slashing.rs | 11 +- polkadot/runtime/test-runtime/src/lib.rs | 3 +- polkadot/runtime/westend/src/lib.rs | 5 +- .../functional/0010-validator-disabling.toml | 2 +- prdoc/pr_2226.prdoc | 28 + substrate/bin/node/runtime/src/lib.rs | 3 +- substrate/frame/babe/src/mock.rs | 3 +- substrate/frame/beefy/src/mock.rs | 3 +- .../test-staking-e2e/src/lib.rs | 162 +--- .../test-staking-e2e/src/mock.rs | 27 +- substrate/frame/fast-unstake/src/mock.rs | 2 +- substrate/frame/grandpa/src/mock.rs | 3 +- substrate/frame/im-online/src/lib.rs | 6 +- substrate/frame/im-online/src/tests.rs | 3 - .../nomination-pools/benchmarking/src/mock.rs | 2 +- .../nomination-pools/test-staking/src/mock.rs | 2 +- .../frame/offences/benchmarking/src/mock.rs | 2 +- substrate/frame/offences/src/lib.rs | 1 - substrate/frame/offences/src/migration.rs | 9 +- substrate/frame/offences/src/mock.rs | 3 +- substrate/frame/root-offences/src/lib.rs | 4 +- substrate/frame/root-offences/src/mock.rs | 3 +- .../frame/session/benchmarking/src/mock.rs | 2 +- substrate/frame/session/src/lib.rs | 2 +- substrate/frame/staking/CHANGELOG.md | 19 + substrate/frame/staking/src/lib.rs | 76 ++ substrate/frame/staking/src/migrations.rs | 57 +- substrate/frame/staking/src/mock.rs | 22 +- substrate/frame/staking/src/pallet/impls.rs | 33 +- substrate/frame/staking/src/pallet/mod.rs | 35 +- substrate/frame/staking/src/slashing.rs | 80 +- substrate/frame/staking/src/tests.rs | 834 ++++++++++-------- substrate/primitives/staking/src/offence.rs | 32 - 33 files changed, 777 insertions(+), 702 deletions(-) create mode 100644 prdoc/pr_2226.prdoc diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index d0c74e4bc958..a61d0c899836 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -64,7 +64,7 @@ use sp_runtime::{ KeyTypeId, Perbill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; -use sp_staking::offence::{DisableStrategy, Kind, Offence, OffenceError, ReportOffence}; +use sp_staking::offence::{Kind, Offence, OffenceError, ReportOffence}; use sp_std::{ collections::{btree_map::Entry, btree_set::BTreeSet}, prelude::*, @@ -134,15 +134,6 @@ where self.time_slot.clone() } - fn disable_strategy(&self) -> DisableStrategy { - match self.kind { - SlashingOffenceKind::ForInvalid => DisableStrategy::Always, - // in the future we might change it based on number of disputes initiated: - // - SlashingOffenceKind::AgainstValid => DisableStrategy::Never, - } - } - fn slash_fraction(&self, _offenders: u32) -> Perbill { self.slash_fraction } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 514643c0a201..d0f1ff0035fc 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -313,7 +313,6 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxExposurePageSize: u32 = 64; pub const MaxNominators: u32 = 256; - pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; // Unbounded number of election targets and voters. @@ -349,7 +348,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = onchain::OnChainExecution; @@ -364,6 +362,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7924939c79bd..03ecd5c070b2 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -613,7 +613,6 @@ parameter_types! { // this is an unbounded number. We just set it to a reasonably high value, 1 full page // of nominators. pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; pub const MaxControllersInDeprecationBatch: u32 = 751; } @@ -634,7 +633,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -647,6 +645,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = NominationPools; type WeightInfo = weights::pallet_staking::WeightInfo; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1649,7 +1648,7 @@ pub mod migrations { } /// Unreleased migrations. Add new ones here: - pub type Unreleased = (); + pub type Unreleased = (pallet_staking::migrations::v15::MigrateV14ToV15,); } /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index c9d79c5f8f23..806f34d7f767 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -21,7 +21,7 @@ requests = { memory = "2G", cpu = "1" } [[relaychain.node_groups]] name = "honest-validator" count = 3 - args = ["-lparachain=debug"] + args = ["-lparachain=debug,runtime::staking=debug"] [[relaychain.node_groups]] image = "{{MALUS_IMAGE}}" diff --git a/prdoc/pr_2226.prdoc b/prdoc/pr_2226.prdoc new file mode 100644 index 000000000000..f03540a50f6c --- /dev/null +++ b/prdoc/pr_2226.prdoc @@ -0,0 +1,28 @@ +title: Validator disabling strategy in runtime + +doc: + - audience: Node Operator + description: | + On each committed offence (no matter slashable or not) the offending validator will be + disabled for a whole era. + - audience: Runtime Dev + description: | + The disabling strategy in staking pallet is no longer hardcoded but abstracted away via + `DisablingStrategy` trait. The trait contains a single function (make_disabling_decision) which + is called for each offence. The function makes a decision if (and which) validators should be + disabled. A default implementation is provided - `UpToLimitDisablingStrategy`. It + will be used on Kusama and Polkadot. In nutshell `UpToLimitDisablingStrategy` + disables offenders up to the configured threshold. Offending validators are not disabled for + offences in previous eras. The threshold is controlled via `DISABLING_LIMIT_FACTOR` (a generic + parameter of `UpToLimitDisablingStrategy`). + +migrations: + db: [] + runtime: + - reference: pallet-staking + description: | + Renames `OffendingValidators` storage item to `DisabledValidators` and changes its type from + `Vec<(u32, bool)>` to `Vec`. + +crates: + - name: pallet-staking \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 43c617023bcb..0caaa8c73226 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -654,7 +654,6 @@ parameter_types! { pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxControllersInDeprecationBatch: u32 = 5900; pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; @@ -690,7 +689,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; @@ -703,6 +701,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index ec54275278eb..395a86e65288 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -144,7 +144,6 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(16); pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -174,7 +173,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -187,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 1c55adc8de4b..0b87de6bf5d7 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -158,7 +158,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -188,7 +187,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -201,6 +199,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 83083c912094..c00bb66ea130 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -23,7 +23,6 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; use sp_core::Get; -use sp_npos_elections::{to_supports, StakedAssignment}; use sp_runtime::Perbill; use crate::mock::RuntimeOrigin; @@ -127,75 +126,48 @@ fn offchainify_works() { } #[test] -/// Replicates the Kusama incident of 8th Dec 2022 and its resolution through the governance +/// Inspired by the Kusama incident of 8th Dec 2022 and its resolution through the governance /// fallback. /// -/// After enough slashes exceeded the `Staking::OffendingValidatorsThreshold`, the staking pallet -/// set `Forcing::ForceNew`. When a new session starts, staking will start to force a new era and -/// calls ::elect(). If at this point EPM and the staking miners did not -/// have enough time to queue a new solution (snapshot + solution submission), the election request -/// fails. If there is no election fallback mechanism in place, EPM enters in emergency mode. -/// Recovery: Once EPM is in emergency mode, subsequent calls to `elect()` will fail until a new -/// solution is added to EPM's `QueuedSolution` queue. This can be achieved through -/// `Call::set_emergency_election_result` or `Call::governance_fallback` dispatchables. Once a new -/// solution is added to the queue, EPM phase transitions to `Phase::Off` and the election flow -/// restarts. Note that in this test case, the emergency throttling is disabled. -fn enters_emergency_phase_after_forcing_before_elect() { +/// Mass slash of validators shouldn't disable more than 1/3 of them (the byzantine threshold). Also +/// no new era should be forced which could lead to EPM entering emergency mode. +fn mass_slash_doesnt_enter_emergency_phase() { let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); - let (ext, pool_state, _) = ExtBuilder::default().epm(epm_builder).build_offchainify(); - - execute_with(ext, || { - log!( - trace, - "current validators (staking): {:?}", - >::validators() - ); - let session_validators_before = Session::validators(); - - roll_to_epm_off(); - assert!(ElectionProviderMultiPhase::current_phase().is_off()); + let staking_builder = StakingExtBuilder::default().validator_count(7); + let (mut ext, _, _) = ExtBuilder::default() + .epm(epm_builder) + .staking(staking_builder) + .build_offchainify(); + ext.execute_with(|| { assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); - // slashes so that staking goes into `Forcing::ForceNew`. - slash_through_offending_threshold(); - assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::ForceNew); + let active_set_size_before_slash = Session::validators().len(); - advance_session_delayed_solution(pool_state.clone()); - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - log_current_time(); + // Slash more than 1/3 of the active validators + let mut slashed = slash_half_the_active_set(); - let era_before_delayed_next = Staking::current_era(); - // try to advance 2 eras. - assert!(start_next_active_era_delayed_solution(pool_state.clone()).is_ok()); - assert_eq!(Staking::current_era(), era_before_delayed_next); - assert!(start_next_active_era(pool_state).is_err()); - assert_eq!(Staking::current_era(), era_before_delayed_next); + let active_set_size_after_slash = Session::validators().len(); - // EPM is still in emergency phase. - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + // active set should stay the same before and after the slash + assert_eq!(active_set_size_before_slash, active_set_size_after_slash); - // session validator set remains the same. - assert_eq!(Session::validators(), session_validators_before); - - // performs recovery through the set emergency result. - let supports = to_supports(&vec![ - StakedAssignment { who: 21, distribution: vec![(21, 10)] }, - StakedAssignment { who: 31, distribution: vec![(21, 10), (31, 10)] }, - StakedAssignment { who: 41, distribution: vec![(41, 10)] }, - ]); - assert!(ElectionProviderMultiPhase::set_emergency_election_result( - RuntimeOrigin::root(), - supports - ) - .is_ok()); + // Slashed validators are disabled up to a limit + slashed.truncate( + pallet_staking::UpToLimitDisablingStrategy::::disable_limit( + active_set_size_after_slash, + ), + ); - // EPM can now roll to signed phase to proceed with elections. The validator set is the - // expected (ie. set through `set_emergency_election_result`). - roll_to_epm_signed(); - //assert!(ElectionProviderMultiPhase::current_phase().is_signed()); - assert_eq!(Session::validators(), vec![21, 31, 41]); - assert_eq!(Staking::current_era(), era_before_delayed_next.map(|e| e + 1)); + // Find the indices of the disabled validators + let active_set = Session::validators(); + let expected_disabled = slashed + .into_iter() + .map(|d| active_set.iter().position(|a| *a == d).unwrap() as u32) + .collect::>(); + + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); + assert_eq!(Session::disabled_validators(), expected_disabled); }); } @@ -253,77 +225,7 @@ fn continuous_slashes_below_offending_threshold() { } #[test] -/// Slashed validator sets intentions in the same era of slashing. -/// -/// When validators are slashed, they are chilled and removed from the current `VoterList`. Thus, -/// the slashed validator should not be considered in the next validator set. However, if the -/// slashed validator sets its intention to validate again in the same era when it was slashed and -/// chilled, the validator may not be removed from the active validator set across eras, provided -/// it would selected in the subsequent era if there was no slash. Nominators of the slashed -/// validator will also be slashed and chilled, as expected, but the nomination intentions will -/// remain after the validator re-set the intention to be validating again. -/// -/// This behaviour is due to removing implicit chill upon slash -/// . -/// -/// Related to . -fn set_validation_intention_after_chilled() { - use frame_election_provider_support::SortedListProvider; - use pallet_staking::{Event, Forcing, Nominators}; - - let (ext, pool_state, _) = ExtBuilder::default() - .epm(EpmExtBuilder::default()) - .staking(StakingExtBuilder::default()) - .build_offchainify(); - - execute_with(ext, || { - assert_eq!(active_era(), 0); - // validator is part of the validator set. - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominate validator 81. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(21), vec![41])); - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator is slashed. it is removed from the `VoterList` through chilling but in the - // current era, the validator is still part of the active validator set. - add_slash(&41); - assert!(Session::validators().contains(&41)); - assert!(!::VoterList::contains(&41)); - assert_eq!( - staking_events(), - [ - Event::Chilled { stash: 41 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 41, - slash_era: 0, - fraction: Perbill::from_percent(10) - } - ], - ); - - // after the nominator is slashed and chilled, the nominations remain. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator sets intention to stake again in the same era it was chilled. - assert_ok!(Staking::validate(RuntimeOrigin::signed(41), Default::default())); - - // progress era and check that the slashed validator is still part of the validator - // set. - assert!(start_next_active_era(pool_state).is_ok()); - assert_eq!(active_era(), 1); - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominations are still active as before the slash. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - }) -} - -#[test] -/// Active ledger balance may fall below ED if account chills before unbonding. +/// Active ledger balance may fall below ED if account chills before unbounding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index a727e3bf8162..8f1775a7e595 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -35,7 +35,7 @@ use sp_runtime::{ transaction_validity, BuildStorage, PerU16, Perbill, Percent, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, }; use sp_std::prelude::*; @@ -236,7 +236,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); pub HistoryDepth: u32 = 84; } @@ -290,6 +289,8 @@ parameter_types! { /// Upper limit on the number of NPOS nominations. const MAX_QUOTA_NOMINATIONS: u32 = 16; +/// Disabling factor set explicitly to byzantine threshold +pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; impl pallet_staking::Config for Runtime { type Currency = Balances; @@ -308,7 +309,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = (); type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = BagsList; @@ -320,6 +320,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl frame_system::offchain::SendTransactionTypes for Runtime @@ -871,7 +872,6 @@ pub(crate) fn on_offence_now( offenders, slash_fraction, Staking::eras_start_session_index(now).unwrap(), - DisableStrategy::WhenSlashed, ); } @@ -886,19 +886,16 @@ pub(crate) fn add_slash(who: &AccountId) { ); } -// Slashes enough validators to cross the `Staking::OffendingValidatorsThreshold`. -pub(crate) fn slash_through_offending_threshold() { - let validators = Session::validators(); - let mut remaining_slashes = - ::OffendingValidatorsThreshold::get() * - validators.len() as u32; +// Slashes 1/2 of the active set. Returns the `AccountId`s of the slashed validators. +pub(crate) fn slash_half_the_active_set() -> Vec { + let mut slashed = Session::validators(); + slashed.truncate(slashed.len() / 2); - for v in validators.into_iter() { - if remaining_slashes != 0 { - add_slash(&v); - remaining_slashes -= 1; - } + for v in slashed.iter() { + add_slash(v); } + + slashed } // Slashes a percentage of the active nominators that haven't been slashed yet, with diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index b731cb822f33..d876f9f6171e 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -134,7 +134,6 @@ impl pallet_staking::Config for Runtime { type NextNewSession = (); type HistoryDepth = ConstU32<84>; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -145,6 +144,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct BalanceToU256; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 4a21da655e5b..2d54f525b1f0 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -146,7 +146,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -176,7 +175,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -189,6 +187,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 239b47834d1f..f91a473e53d5 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -104,7 +104,7 @@ use sp_runtime::{ PerThing, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ - offence::{DisableStrategy, Kind, Offence, ReportOffence}, + offence::{Kind, Offence, ReportOffence}, SessionIndex, }; use sp_std::prelude::*; @@ -847,10 +847,6 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::Never - } - fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index f9959593494a..12333d59ef89 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -50,9 +50,6 @@ fn test_unresponsiveness_slash_fraction() { dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); - - // Offline offences should never lead to being disabled. - assert_eq!(dummy_offence.disable_strategy(), DisableStrategy::Never); } #[test] diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index a59f8f3f40e7..2752d53a6b9f 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -111,7 +111,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -124,6 +123,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 2ec47e0d1645..93a05ddfae99 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -125,7 +125,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -138,6 +137,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 27129e73c71e..eeaa1364504a 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index 1c7ffeca7198..a328b2fee4e2 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -132,7 +132,6 @@ where &concurrent_offenders, &slash_perbill, offence.session_index(), - offence.disable_strategy(), ); // Deposit the event. diff --git a/substrate/frame/offences/src/migration.rs b/substrate/frame/offences/src/migration.rs index 3b5cf3ce9269..199f47491369 100644 --- a/substrate/frame/offences/src/migration.rs +++ b/substrate/frame/offences/src/migration.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, Twox64Concat, }; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; use sp_std::vec::Vec; #[cfg(feature = "try-runtime")] @@ -106,12 +106,7 @@ pub fn remove_deferred_storage() -> Weight { let deferred = >::take(); log::info!(target: LOG_TARGET, "have {} deferred offences, applying.", deferred.len()); for (offences, perbill, session) in deferred.iter() { - let consumed = T::OnOffenceHandler::on_offence( - offences, - perbill, - *session, - DisableStrategy::WhenSlashed, - ); + let consumed = T::OnOffenceHandler::on_offence(offences, perbill, *session); weight = weight.saturating_add(consumed); } diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 31d5f805f3e4..9a3120e41eaa 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -33,7 +33,7 @@ use sp_runtime::{ BuildStorage, Perbill, }; use sp_staking::{ - offence::{self, DisableStrategy, Kind, OffenceDetails}, + offence::{self, Kind, OffenceDetails}, SessionIndex, }; @@ -51,7 +51,6 @@ impl offence::OnOffenceHandler _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Weight { OnOffencePerbill::mutate(|f| { *f = slash_fraction.to_vec(); diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 24d259ed1d4a..6531080b8d10 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -33,7 +33,7 @@ use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; pub use pallet::*; @@ -128,7 +128,7 @@ pub mod pallet { T::AccountId, IdentificationTuple, Weight, - >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + >>::on_offence(&offenders, &slash_fraction, session_index); } } } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 626db138c2bf..7e7332c3f7e3 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -133,7 +133,6 @@ parameter_types! { pub static SlashDeferDuration: EraIndex = 0; pub const BondingDuration: EraIndex = 3; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } impl pallet_staking::Config for Test { @@ -153,7 +152,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; @@ -165,6 +163,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 81052141fd86..6cefa8f39a8c 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 17b6aa7a4640..9506e98adf7d 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -627,7 +627,7 @@ impl Pallet { Validators::::put(&validators); if changed { - // reset disabled validators + // reset disabled validators if active set was changed >::take(); } diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md index 719aa388755f..113b7a6200b6 100644 --- a/substrate/frame/staking/CHANGELOG.md +++ b/substrate/frame/staking/CHANGELOG.md @@ -7,6 +7,25 @@ on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a single integer version number for staking pallet to keep track of all storage migrations. +## [v15] + +### Added + +- New trait `DisablingStrategy` which is responsible for making a decision which offenders should be + disabled on new offence. +- Default implementation of `DisablingStrategy` - `UpToLimitDisablingStrategy`. It + disables each new offender up to a threshold (1/3 by default). Offenders are not runtime disabled for + offences in previous era(s). But they will be low-priority node-side disabled for dispute initiation. +- `OffendingValidators` storage item is replaced with `DisabledValidators`. The former keeps all + offenders and if they are disabled or not. The latter just keeps a list of all offenders as they + are disabled by default. + +### Deprecated + +- `enum DisableStrategy` is no longer needed because disabling is not related to the type of the + offence anymore. A decision if a offender is disabled or not is made by a `DisablingStrategy` + implementation. + ## [v14] ### Added diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index f5b7e3eca3de..047ad6b87cc1 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1239,3 +1239,79 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { type MaxValidators = frame_support::traits::ConstU32<100>; type MaxNominators = frame_support::traits::ConstU32<100>; } + +/// Controls validator disabling +pub trait DisablingStrategy { + /// Make a disabling decision. Returns the index of the validator to disable or `None` if no new + /// validator should be disabled. + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option; +} + +/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a +/// threshold. `DISABLING_LIMIT_FACTOR` is the factor of the maximum disabled validators in the +/// active set. E.g. setting this value to `3` means no more than 1/3 of the validators in the +/// active set can be disabled in an era. +/// By default a factor of 3 is used which is the byzantine threshold. +pub struct UpToLimitDisablingStrategy; + +impl UpToLimitDisablingStrategy { + /// Disabling limit calculated from the total number of validators in the active set. When + /// reached no more validators will be disabled. + pub fn disable_limit(validators_len: usize) -> usize { + validators_len + .saturating_sub(1) + .checked_div(DISABLING_LIMIT_FACTOR) + .unwrap_or_else(|| { + defensive!("DISABLING_LIMIT_FACTOR should not be 0"); + 0 + }) + } +} + +impl DisablingStrategy + for UpToLimitDisablingStrategy +{ + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option { + let active_set = T::SessionInterface::validators(); + + // We don't disable more than the limit + if currently_disabled.len() >= Self::disable_limit(active_set.len()) { + log!( + debug, + "Won't disable: reached disabling limit {:?}", + Self::disable_limit(active_set.len()) + ); + return None + } + + // We don't disable for offences in previous eras + if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { + log!( + debug, + "Won't disable: current_era {:?} > slash_era {:?}", + Pallet::::current_era().unwrap_or_default(), + slash_era + ); + return None + } + + let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { + idx as u32 + } else { + log!(debug, "Won't disable: offender not in active set",); + return None + }; + + log!(debug, "Will disable {:?}", offender_idx); + + Some(offender_idx) + } +} diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index d5b18421d5b6..510252be26c9 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -20,9 +20,10 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::{GetStorageVersion, OnRuntimeUpgrade}, + traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -59,11 +60,61 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migrating `OffendingValidators` from `Vec<(u32, bool)>` to `Vec` +pub mod v15 { + use super::*; + + // The disabling strategy used by staking pallet + type DefaultDisablingStrategy = UpToLimitDisablingStrategy; + + pub struct VersionUncheckedMigrateV14ToV15(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { + fn on_runtime_upgrade() -> Weight { + let mut migrated = v14::OffendingValidators::::take() + .into_iter() + .filter(|p| p.1) // take only disabled validators + .map(|p| p.0) + .collect::>(); + + // Respect disabling limit + migrated.truncate(DefaultDisablingStrategy::disable_limit( + T::SessionInterface::validators().len(), + )); + + DisabledValidators::::set(migrated); + + log!(info, "v15 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + v14::OffendingValidators::::decode_len().is_none(), + "OffendingValidators is not empty after the migration" + ); + Ok(()) + } + } + + pub type MigrateV14ToV15 = VersionedMigration< + 14, + 15, + VersionUncheckedMigrateV14ToV15, + Pallet, + ::DbWeight, + >; +} + /// Migration of era exposure storage items to paged exposures. /// Changelog: [v14.](https://github.com/paritytech/substrate/blob/ankan/paged-rewards-rebased2/frame/staking/CHANGELOG.md#14) pub mod v14 { use super::*; + #[frame_support::storage_alias] + pub(crate) type OffendingValidators = + StorageValue, Vec<(u32, bool)>, ValueQuery>; + pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { @@ -73,10 +124,10 @@ pub mod v14 { if in_code == 14 && on_chain == 13 { in_code.put::>(); - log!(info, "v14 applied successfully."); + log!(info, "staking v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) } else { - log!(warn, "v14 not applied."); + log!(warn, "staking v14 not applied."); T::DbWeight::get().reads(1) } } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index b46b863c016e..8c60dec65a81 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -34,7 +34,7 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_io; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, OnStakingUpdate, }; @@ -186,7 +186,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } parameter_types! { @@ -267,6 +266,9 @@ impl OnStakingUpdate for EventListenerMock { } } +// Disabling threshold for `UpToLimitDisablingStrategy` +pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; + impl crate::pallet::pallet::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; @@ -284,7 +286,6 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. @@ -297,6 +298,7 @@ impl crate::pallet::pallet::Config for Test { type EventListeners = EventListenerMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct WeightedNominationsQuota; @@ -461,6 +463,8 @@ impl ExtBuilder { (31, self.balance_factor * 2000), (41, self.balance_factor * 2000), (51, self.balance_factor * 2000), + (201, self.balance_factor * 2000), + (202, self.balance_factor * 2000), // optional nominator (100, self.balance_factor * 2000), (101, self.balance_factor * 2000), @@ -488,8 +492,10 @@ impl ExtBuilder { (31, 31, self.balance_factor * 500, StakerStatus::::Validator), // an idle validator (41, 41, self.balance_factor * 1000, StakerStatus::::Idle), - ]; - // optionally add a nominator + (51, 51, self.balance_factor * 1000, StakerStatus::::Idle), + (201, 201, self.balance_factor * 1000, StakerStatus::::Idle), + (202, 202, self.balance_factor * 1000, StakerStatus::::Idle), + ]; // optionally add a nominator if self.nominate { stakers.push(( 101, @@ -728,12 +734,11 @@ pub(crate) fn on_offence_in_era( >], slash_fraction: &[Perbill], era: EraIndex, - disable_strategy: DisableStrategy, ) { let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); + let _ = Staking::on_offence(offenders, slash_fraction, start_session); return } else if bonded_era > era { break @@ -745,7 +750,6 @@ pub(crate) fn on_offence_in_era( offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap(), - disable_strategy, ); } else { panic!("cannot slash in era {}", era); @@ -760,7 +764,7 @@ pub(crate) fn on_offence_now( slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now, DisableStrategy::WhenSlashed) + on_offence_in_era(offenders, slash_fraction, now) } pub(crate) fn add_slash(who: &AccountId) { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 0c0ef0dbf463..f4d4a7133dd5 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -43,7 +43,7 @@ use sp_runtime::{ }; use sp_staking::{ currency_to_vote::CurrencyToVote, - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, OnStakingUpdate, Page, SessionIndex, Stake, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -505,10 +505,8 @@ impl Pallet { } // disable all offending validators that have been disabled for the whole era - for (index, disabled) in >::get() { - if disabled { - T::SessionInterface::disable_validator(index); - } + for index in >::get() { + T::SessionInterface::disable_validator(index); } } @@ -598,8 +596,8 @@ impl Pallet { >::insert(&active_era.index, validator_payout); T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); - // Clear offending validators. - >::kill(); + // Clear disabled validators. + >::kill(); } } @@ -868,14 +866,6 @@ impl Pallet { Self::deposit_event(Event::::ForceEra { mode }); } - /// Ensures that at the end of the current session there will be a new era. - pub(crate) fn ensure_new_era() { - match ForceEra::::get() { - Forcing::ForceAlways | Forcing::ForceNew => (), - _ => Self::set_force_era(Forcing::ForceNew), - } - } - #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, @@ -1447,7 +1437,6 @@ where >], slash_fraction: &[Perbill], slash_session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); let mut consumed_weight = Weight::from_parts(0, 0); @@ -1512,7 +1501,6 @@ where window_start, now: active_era, reward_proportion, - disable_strategy, }); Self::deposit_event(Event::::SlashReported { @@ -1986,7 +1974,8 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_count() + Self::check_count()?; + Self::ensure_disabled_validators_sorted() } /// Invariants: @@ -2300,4 +2289,12 @@ impl Pallet { Ok(()) } + + fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> { + ensure!( + DisabledValidators::::get().windows(2).all(|pair| pair[0] <= pair[1]), + "DisabledValidators is not sorted" + ); + Ok(()) + } } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 76ddad6f1359..9c968d883444 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -47,10 +47,11 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, - NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, - SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, DisablingStrategy, + EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, + MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -67,7 +68,7 @@ pub mod pallet { use super::*; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -217,10 +218,6 @@ pub mod pallet { #[pallet::constant] type MaxExposurePageSize: Get; - /// The fraction of the validator set that is safe to be offending. - /// After the threshold is reached a new era will be forced. - type OffendingValidatorsThreshold: Get; - /// Something that provides a best-effort sorted list of voters aka electing nominators, /// used for NPoS election. /// @@ -278,6 +275,9 @@ pub mod pallet { /// WARNING: this only reports slashing and withdraw events for the time being. type EventListeners: sp_staking::OnStakingUpdate>; + // `DisablingStragegy` controls how validators are disabled + type DisablingStrategy: DisablingStrategy; + /// Some parameters of the benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -654,19 +654,16 @@ pub mod pallet { #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; - /// Indices of validators that have offended in the active era and whether they are currently - /// disabled. + /// Indices of validators that have offended in the active era. The offenders are disabled for a + /// whole era. For this reason they are kept here - only staking pallet knows about eras. The + /// implementor of [`DisablingStrategy`] defines if a validator should be disabled which + /// implicitly means that the implementor also controls the max number of disabled validators. /// - /// This value should be a superset of disabled validators since not all offences lead to the - /// validator being disabled (if there was no slash). This is needed to track the percentage of - /// validators that have offended in the current era, ensuring a new era is forced if - /// `OffendingValidatorsThreshold` is reached. The vec is always kept sorted so that we can find - /// whether a given validator has previously offended using binary search. It gets cleared when - /// the era ends. + /// The vec is always kept sorted so that we can find whether a given validator has previously + /// offended using binary search. #[pallet::storage] #[pallet::unbounded] - #[pallet::getter(fn offending_validators)] - pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; /// The threshold for when users can start calling `chill_other` for other validators / /// nominators. The threshold is compared to the actual number of validators / nominators diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 2011e9eb8301..f831f625957d 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,21 +50,21 @@ //! Based on research at use crate::{ - BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, NominatorSlashInEra, - OffendingValidators, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, + BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure, NegativeImbalanceOf, + NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, + traits::{Currency, Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::{offence::DisableStrategy, EraIndex}; +use sp_staking::EraIndex; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. @@ -220,8 +220,6 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The maximum percentage of a slash that ever gets paid out. /// This is f_inf in the paper. pub(crate) reward_proportion: Perbill, - /// When to disable offenders. - pub(crate) disable_strategy: DisableStrategy, } /// Computes a slash of a validator and nominators. It returns an unapplied @@ -280,18 +278,13 @@ pub(crate) fn compute_slash( let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); if target_span == Some(spans.span_index()) { - // misbehavior occurred within the current slashing span - take appropriate - // actions. - - // chill the validator - it misbehaved in the current span and should - // not continue in the next election. also end the slashing span. + // misbehavior occurred within the current slashing span - end current span. + // Check for details. spans.end_span(params.now); - >::chill_stash(params.stash); } } - let disable_when_slashed = params.disable_strategy != DisableStrategy::Never; - add_offending_validator::(params.stash, disable_when_slashed); + add_offending_validator::(¶ms); let mut nominators_slashed = Vec::new(); reward_payout += slash_nominators::(params.clone(), prior_slash_p, &mut nominators_slashed); @@ -320,54 +313,31 @@ fn kick_out_if_recent(params: SlashParams) { ); if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { + // Check https://github.com/paritytech/polkadot-sdk/issues/2650 for details spans.end_span(params.now); - >::chill_stash(params.stash); } - let disable_without_slash = params.disable_strategy == DisableStrategy::Always; - add_offending_validator::(params.stash, disable_without_slash); + add_offending_validator::(¶ms); } -/// Add the given validator to the offenders list and optionally disable it. -/// If after adding the validator `OffendingValidatorsThreshold` is reached -/// a new era will be forced. -fn add_offending_validator(stash: &T::AccountId, disable: bool) { - OffendingValidators::::mutate(|offending| { - let validators = T::SessionInterface::validators(); - let validator_index = match validators.iter().position(|i| i == stash) { - Some(index) => index, - None => return, - }; - - let validator_index_u32 = validator_index as u32; - - match offending.binary_search_by_key(&validator_index_u32, |(index, _)| *index) { - // this is a new offending validator - Err(index) => { - offending.insert(index, (validator_index_u32, disable)); - - let offending_threshold = - T::OffendingValidatorsThreshold::get() * validators.len() as u32; - - if offending.len() >= offending_threshold as usize { - // force a new era, to select a new validator set - >::ensure_new_era() - } - - if disable { - T::SessionInterface::disable_validator(validator_index_u32); - } - }, - Ok(index) => { - if disable && !offending[index].1 { - // the validator had previously offended without being disabled, - // let's make sure we disable it now - offending[index].1 = true; - T::SessionInterface::disable_validator(validator_index_u32); - } - }, +/// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of +/// validators provided by [`make_disabling_decision`]. +fn add_offending_validator(params: &SlashParams) { + DisabledValidators::::mutate(|disabled| { + if let Some(offender) = + T::DisablingStrategy::decision(params.stash, params.slash_era, &disabled) + { + // Add the validator to `DisabledValidators` and disable it. Do nothing if it is + // already disabled. + if let Err(index) = disabled.binary_search_by_key(&offender, |index| *index) { + disabled.insert(index, offender); + T::SessionInterface::disable_validator(offender); + } } }); + + // `DisabledValidators` should be kept sorted + debug_assert!(DisabledValidators::::get().windows(2).all(|pair| pair[0] < pair[1])); } /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 87f6fd424bd7..6cf5a56e5a6d 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -38,7 +38,7 @@ use sp_runtime::{ Perbill, Percent, Perquintill, Rounding, TokenError, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, SessionIndex, }; use sp_std::prelude::*; @@ -716,56 +716,65 @@ fn nominating_and_rewards_should_work() { #[test] fn nominators_also_get_slashed_pro_rata() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - - // staked values; - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + + // staked values; + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); - // both stakes must have been decreased. - assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); - assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); + // both stakes must have been decreased. + assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); + assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!(Staking::ledger(101.into()).unwrap().active, nominator_stake - nominator_share); - assert_eq!(Staking::ledger(11.into()).unwrap().active, validator_stake - validator_share); - assert_eq!( - balances(&101).0, // free balance - nominator_balance - nominator_share, - ); - assert_eq!( - balances(&11).0, // free balance - validator_balance - validator_share, - ); - // Because slashing happened. - assert!(is_disabled(11)); - }); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + assert_eq!( + balances(&101).0, // free balance + nominator_balance - nominator_share, + ); + assert_eq!( + balances(&11).0, // free balance + validator_balance - validator_share, + ); + // Because slashing happened. + assert!(is_disabled(11)); + }); } #[test] @@ -2401,7 +2410,7 @@ fn era_is_always_same_length() { } #[test] -fn offence_forces_new_era() { +fn offence_doesnt_force_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { @@ -2411,7 +2420,7 @@ fn offence_forces_new_era() { &[Perbill::from_percent(5)], ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2435,26 +2444,32 @@ fn offence_ensures_new_era_without_clobbering() { #[test] fn offence_deselects_validator_even_when_slash_is_zero() { - ExtBuilder::default().build_and_execute(|| { - assert!(Session::validators().contains(&11)); - assert!(>::contains_key(11)); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + assert!(Session::validators().contains(&11)); + assert!(>::contains_key(11)); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(1); + mock::start_active_era(1); - assert!(!Session::validators().contains(&11)); - assert!(!>::contains_key(11)); - }); + // The validator should be reenabled in the new era + assert!(!is_disabled(11)); + }); } #[test] @@ -2479,71 +2494,70 @@ fn slashing_performed_according_exposure() { } #[test] -fn slash_in_old_span_does_not_deselect() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); +fn validator_is_not_disabled_for_an_offence_in_previous_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - mock::start_active_era(2); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(!Session::validators().contains(&11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(3); + mock::start_active_era(2); - // this staker is in a new slashing span now, having re-registered after - // their prior slash. + // the validator is not disabled in the new era + Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - 1, - DisableStrategy::WhenSlashed, - ); + mock::start_active_era(3); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + // an offence committed in era 1 is reported in era 3 + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + 1, + ); - // but we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); + // the validator doesn't get disabled for an old offence + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - // NOTE: A 100% slash here would clean up the account, causing de-registration. - &[Perbill::from_percent(95)], - 1, - DisableStrategy::WhenSlashed, - ); + // and we are not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + // NOTE: A 100% slash here would clean up the account, causing de-registration. + &[Perbill::from_percent(95)], + 1, + ); - // but it's disabled - assert!(is_disabled(11)); - // and we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); - }); + // the validator doesn't get disabled again + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); + // and we are still not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); + }); } #[test] @@ -2671,7 +2685,7 @@ fn dont_slash_if_fraction_is_zero() { // The validator hasn't been slashed. The new era is not forced. assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2692,7 +2706,7 @@ fn only_slash_for_max_in_era() { // The validator has been slashed and has been force-chilled. assert_eq!(Balances::free_balance(11), 500); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); on_offence_now( &[OffenceDetails { @@ -2833,7 +2847,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(10)], 2, - DisableStrategy::WhenSlashed, ); assert_eq!(Balances::free_balance(11), 900); @@ -2860,7 +2873,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(30)], 3, - DisableStrategy::WhenSlashed, ); // 11 was not further slashed, but 21 and 101 were. @@ -2882,7 +2894,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(20)], 2, - DisableStrategy::WhenSlashed, ); // 11 was further slashed, but 21 and 101 were not. @@ -2999,11 +3010,8 @@ fn deferred_slashes_are_deferred() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, Event::StakersElected, - Event::ForceEra { mode: Forcing::NotForcing }, .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } @@ -3029,7 +3037,6 @@ fn retroactive_deferred_slashes_two_eras_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 1, // should be deferred for two full eras, and applied at the beginning of era 4. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3037,8 +3044,6 @@ fn retroactive_deferred_slashes_two_eras_before() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, .., Event::Slashed { staker: 11, amount: 100 }, @@ -3067,7 +3072,6 @@ fn retroactive_deferred_slashes_one_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 2, // should be deferred for two full eras, and applied at the beginning of era 5. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3197,7 +3201,6 @@ fn remove_deferred() { &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, - DisableStrategy::WhenSlashed, ); // fails if empty @@ -3312,192 +3315,198 @@ fn remove_multi_deferred() { #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - // 100 has approval for 11 as of now - assert!(Staking::nominators(101).unwrap().targets.contains(&11)); + // pre-slash balance + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 and 21 both have the support of 100 - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // 100 has approval for 11 as of now + assert!(Staking::nominators(101).unwrap().targets.contains(&11)); - assert_eq!(exposure_11.total, 1000 + 125); - assert_eq!(exposure_21.total, 1000 + 375); + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(10)], - ); + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(10), - slash_era: 1 - }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, - ] - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); - // post-slash balance - let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + ] + ); - // check that validator was chilled. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); - // actually re-bond the slashed validator - assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); + // check that validator was disabled. + assert!(is_disabled(11)); - mock::start_active_era(2); - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // actually re-bond the slashed validator + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); - // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 - // 900 + 146 - assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); - // 1000 + 342 - assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); - assert_eq!(500 - 146 - 342, nominator_slash_amount_11); - }); + mock::start_active_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 + // 900 + 146 + assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); + // 1000 + 342 + assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); + assert_eq!(500 - 146 - 342, nominator_slash_amount_11); + }); } #[test] -fn non_slashable_offence_doesnt_disable_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); +fn non_slashable_offence_disables_validator() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - // offence with no slash associated - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); + // offence with no slash associated + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); - // it does NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it does NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - ); + // offence that slashes 25% of the bond + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - // it DOES NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it DOES NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 wasn't slashable so it wasn't disabled - assert!(!is_disabled(11)); - // whereas validator 20 gets disabled - assert!(is_disabled(21)); - }); + // the offence for validator 11 wasn't slashable but it is disabled + assert!(is_disabled(11)); + // validator 21 gets disabled too + assert!(is_disabled(21)); + }); } #[test] fn slashing_independent_of_disabling_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); + ExtBuilder::default() + .validator_count(5) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let now = Staking::active_era().unwrap().index; + let now = Staking::active_era().unwrap().index; - // offence with no slash associated, BUT disabling - on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - now, - DisableStrategy::Always, - ); + // offence with no slash associated + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond, BUT not disabling - on_offence_in_era( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - now, - DisableStrategy::Never, - ); + // offence that slashes 25% of the bond + on_offence_in_era( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 was explicitly disabled - assert!(is_disabled(11)); - // whereas validator 21 is explicitly not disabled - assert!(!is_disabled(21)); - }); + // first validator is disabled but not slashed + assert!(is_disabled(11)); + // second validator is slashed but not disabled + assert!(!is_disabled(21)); + }); } #[test] -fn offence_threshold_triggers_new_era() { +fn offence_threshold_doesnt_trigger_new_era() { ExtBuilder::default() .validator_count(4) .set_status(41, StakerStatus::Validator) @@ -3506,12 +3515,14 @@ fn offence_threshold_triggers_new_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); assert_eq!( - ::OffendingValidatorsThreshold::get(), - Perbill::from_percent(75), + UpToLimitDisablingStrategy::::disable_limit( + Session::validators().len() + ), + 1 ); - // we have 4 validators and an offending validator threshold of 75%, - // once the third validator commits an offence a new era should be forced + // we have 4 validators and an offending validator threshold of 1/3, + // even if the third validator commits an offence a new era should not be forced let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); @@ -3522,6 +3533,9 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 11 should be disabled because the byzantine threshold is 1 + assert!(is_disabled(11)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3529,6 +3543,10 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 21 should not be disabled because the number of disabled validators will be above the + // byzantine threshold + assert!(!is_disabled(21)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3536,28 +3554,29 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); - assert_eq!(ForceEra::::get(), Forcing::ForceNew); + // same for 31 + assert!(!is_disabled(31)); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } #[test] fn disabled_validators_are_kept_disabled_for_whole_era() { ExtBuilder::default() - .validator_count(4) + .validator_count(7) .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) .build_and_execute(|| { mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); - on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3566,18 +3585,15 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { // nominations are not updated. assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // validator 11 should not be disabled since the offence wasn't slashable - assert!(!is_disabled(11)); // validator 21 gets disabled since it got slashed assert!(is_disabled(21)); advance_session(); // disabled validators should carry-on through all sessions in the era - assert!(!is_disabled(11)); assert!(is_disabled(21)); - // validator 11 should now get disabled + // validator 11 commits an offence on_offence_now( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3687,27 +3703,34 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + let exposure = Staking::eras_stakers(active_era(), &11); + assert_eq!(Balances::free_balance(101), 2000); - on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 is still removed.. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); - // but their nominations are kept. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - }); + // 11 is not removed but disabled + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(is_disabled(11)); + // and their nominations are kept. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + }); } #[test] @@ -4710,7 +4733,7 @@ fn offences_weight_calculated_correctly() { let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); assert_eq!( - Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), + Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), zero_offence_weight ); @@ -4735,7 +4758,6 @@ fn offences_weight_calculated_correctly() { &offenders, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed ), n_offence_unapplied_weight ); @@ -4765,7 +4787,6 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -7011,62 +7032,71 @@ mod staking_unchecked { #[test] fn virtual_nominators_are_lazily_slashed() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - // make 101 a virtual nominator - ::migrate_to_virtual_staker(&101); - // set payee different to self. - assert_ok!(::update_payee(&101, &102)); - - // cache values - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!( - Staking::ledger(101.into()).unwrap().active, - nominator_stake - nominator_share - ); - assert_eq!( - Staking::ledger(11.into()).unwrap().active, - validator_stake - validator_share - ); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); - // validator balance is slashed as usual - assert_eq!(balances(&11).0, validator_balance - validator_share); - // Because slashing happened. - assert!(is_disabled(11)); + // validator balance is slashed as usual + assert_eq!(balances(&11).0, validator_balance - validator_share); + // Because slashing happened. + assert!(is_disabled(11)); - // but virtual nominator's balance is not slashed. - assert_eq!(Balances::free_balance(&101), nominator_balance); - // but slash is broadcasted to slash observers. - assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); - }) + // but virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) } } mod ledger { @@ -7926,3 +7956,69 @@ mod ledger_recovery { }) } } + +mod byzantine_threshold_disabling_strategy { + use crate::{ + tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy, + }; + use sp_staking::EraIndex; + + // Common test data - the stash of the offending validator, the era of the offence and the + // active set + const OFFENDER_ID: ::AccountId = 7; + const SLASH_ERA: EraIndex = 1; + const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; + const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set + + #[test] + fn dont_disable_for_ancient_offence() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn dont_disable_beyond_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1, 2]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn disable_when_below_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert_eq!(disable_offender, Some(OFFENDER_VALIDATOR_IDX)); + }); + } +} diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 30d96d0cbafc..2c2ebc1fc971 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -37,29 +37,6 @@ pub type Kind = [u8; 16]; /// so that we can slash it accordingly. pub type OffenceCount = u32; -/// In case of an offence, which conditions get an offending validator disabled. -#[derive( - Clone, - Copy, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - Encode, - Decode, - sp_runtime::RuntimeDebug, - scale_info::TypeInfo, -)] -pub enum DisableStrategy { - /// Independently of slashing, this offence will not disable the offender. - Never, - /// Only disable the offender if it is also slashed. - WhenSlashed, - /// Independently of slashing, this offence will always disable the offender. - Always, -} - /// A trait implemented by an offence report. /// /// This trait assumes that the offence is legitimate and was validated already. @@ -102,11 +79,6 @@ pub trait Offence { /// number. Note that for GRANDPA the round number is reset each epoch. fn time_slot(&self) -> Self::TimeSlot; - /// In which cases this offence needs to disable offenders until the next era starts. - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::WhenSlashed - } - /// A slash fraction of the total exposure that should be slashed for this /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// @@ -177,15 +149,12 @@ pub trait OnOffenceHandler { /// /// The `session` parameter is the session index of the offence. /// - /// The `disable_strategy` parameter decides if the offenders need to be disabled immediately. - /// /// The receiver might decide to not accept this offence. In this case, the call site is /// responsible for queuing the report and re-submitting again. fn on_offence( offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Res; } @@ -194,7 +163,6 @@ impl OnOffenceHandler _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Res { Default::default() } From d893cde2cfd1992a3e589614ae09088d92f28a59 Mon Sep 17 00:00:00 2001 From: Ron Date: Fri, 26 Apr 2024 23:51:58 +0800 Subject: [PATCH 16/27] Snowbridge: deposit extra fee to beneficiary on Asset Hub (#4175) Just the upper-stream for https://github.com/Snowfork/polkadot-sdk/pull/137 and more context there. --------- Co-authored-by: Clara van Staden Co-authored-by: Adrian Catangiu --- .../primitives/router/src/inbound/mod.rs | 6 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 133 +++++++++++++++++- prdoc/pr_4175.prdoc | 13 ++ 3 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4175.prdoc diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index c20554c6d184..54e47a7a8b6a 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -273,8 +273,10 @@ where }, None => { instructions.extend(vec![ - // Deposit asset to beneficiary. - DepositAsset { assets: Definite(asset.into()), beneficiary }, + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, ]); }, } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d0c02e611349..1c1c51404aa4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -27,7 +27,7 @@ use snowbridge_pallet_inbound_queue_fixtures::{ }; use snowbridge_pallet_system; use snowbridge_router_primitives::inbound::{ - Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, + Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, }; use sp_core::H256; use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; @@ -40,6 +40,7 @@ const TREASURY_ACCOUNT: [u8; 32] = const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const INSUFFICIENT_XCM_FEE: u128 = 1000; +const XCM_FEE: u128 = 4_000_000_000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -555,3 +556,133 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } + +fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { + let weth_asset_location: Location = Location::new( + 2, + [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], + ); + // (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) + // Fund asset hub sovereign on bridge hub + let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( + 1, + [Parachain(AssetHubRococo::para_id().into())], + )); + BridgeHubRococo::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]); + + // Register WETH + AssetHubRococo::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + asset_hub_sovereign.into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + }); + + // Send WETH to an existent account on asset hub + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: account_id }, + amount: 1_000_000, + fee, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into())); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient fee + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( +) { + // On AH the xcm fee is 33_873_024 and the ED is 3_300_000 + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 36_000_000); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient ED + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} diff --git a/prdoc/pr_4175.prdoc b/prdoc/pr_4175.prdoc new file mode 100644 index 000000000000..7fc2fb68b38e --- /dev/null +++ b/prdoc/pr_4175.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Snowbridge: deposit extra fee to beneficiary on Asset Hub" + +doc: + - audience: Runtime Dev + description: | + Snowbridge transfers arriving on Asset Hub will deposit both asset and fees to beneficiary so the fees will not get trapped. + Another benefit is when fees left more than ED, could be used to create the beneficiary account in case it does not exist on asset hub. + +crates: + - name: snowbridge-router-primitives From 2a497d297575947b613fe0f3bbac9273a48fd6b0 Mon Sep 17 00:00:00 2001 From: antiyro <74653697+antiyro@users.noreply.github.com> Date: Fri, 26 Apr 2024 18:23:58 +0200 Subject: [PATCH 17/27] fix(seal): shameless fix on sealing typo (#4304) --- substrate/client/consensus/manual-seal/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index c3d360f07197..8fc7e7ecab2f 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -86,7 +86,7 @@ where BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } -/// Params required to start the instant sealing authorship task. +/// Params required to start the manual sealing authorship task. pub struct ManualSealParams, TP, SC, CS, CIDP, P> { /// Block import instance. pub block_import: BI, @@ -114,7 +114,7 @@ pub struct ManualSealParams, TP, SC, C pub create_inherent_data_providers: CIDP, } -/// Params required to start the manual sealing authorship task. +/// Params required to start the instant sealing authorship task. pub struct InstantSealParams, TP, SC, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, From 73b9a8391fa0b18308fa35f905e31cec77f5618f Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sun, 28 Apr 2024 14:35:51 +0200 Subject: [PATCH 18/27] [Staking] Runtime api if era rewards are pending to be claimed (#4301) closes https://github.com/paritytech/polkadot-sdk/issues/426. related to https://github.com/paritytech/polkadot-sdk/pull/1189. Would help offchain programs to query if there are unclaimed pages of rewards for a given era. The logic could look like below ```js // loop as long as all era pages are claimed. while (api.call.stakingApi.pendingRewards(era, validator_stash)) { api.tx.staking.payout_stakers(validator_stash, era) } ``` --- polkadot/runtime/westend/src/lib.rs | 4 + prdoc/pr_4301.prdoc | 13 +++ substrate/bin/node/runtime/src/lib.rs | 4 + .../frame/staking/runtime-api/src/lib.rs | 5 +- substrate/frame/staking/src/lib.rs | 28 ++++- substrate/frame/staking/src/pallet/impls.rs | 4 + substrate/frame/staking/src/tests.rs | 107 ++++++++++++++++++ 7 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_4301.prdoc diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 03ecd5c070b2..de961bb4c398 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2250,6 +2250,10 @@ sp_api::impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } #[cfg(feature = "try-runtime")] diff --git a/prdoc/pr_4301.prdoc b/prdoc/pr_4301.prdoc new file mode 100644 index 000000000000..2ca2534243a8 --- /dev/null +++ b/prdoc/pr_4301.prdoc @@ -0,0 +1,13 @@ +title: New runtime api to check if a validator has pending pages of rewards for an era. + +doc: + - audience: + - Node Dev + - Runtime User + description: | + Creates a new runtime api to check if reward for an era is pending for a validator. Era rewards are paged and this + api will return true as long as there is one or more pages of era reward which are not claimed. + +crates: +- name: pallet-staking +- name: pallet-staking-runtime-api diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 0caaa8c73226..5d8016532a5d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2791,6 +2791,10 @@ impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } impl sp_consensus_babe::BabeApi for Runtime { diff --git a/substrate/frame/staking/runtime-api/src/lib.rs b/substrate/frame/staking/runtime-api/src/lib.rs index b04c383a077d..7955f4184a43 100644 --- a/substrate/frame/staking/runtime-api/src/lib.rs +++ b/substrate/frame/staking/runtime-api/src/lib.rs @@ -30,7 +30,10 @@ sp_api::decl_runtime_apis! { /// Returns the nominations quota for a nominator with a given balance. fn nominations_quota(balance: Balance) -> u32; - /// Returns the page count of exposures for a validator in a given era. + /// Returns the page count of exposures for a validator `account` in a given era. fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; + + /// Returns true if validator `account` has pages to be claimed for the given era. + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool; } } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 047ad6b87cc1..692e62acfdff 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1035,11 +1035,37 @@ where /// can and add more functions to it as needed. pub struct EraInfo(sp_std::marker::PhantomData); impl EraInfo { + /// Returns true if validator has one or more page of era rewards not claimed yet. + // Also looks at legacy storage that can be cleaned up after #433. + pub fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool { + let page_count = if let Some(overview) = >::get(&era, validator) { + overview.page_count + } else { + if >::contains_key(era, validator) { + // this means non paged exposure, and we treat them as single paged. + 1 + } else { + // if no exposure, then no rewards to claim. + return false + } + }; + + // check if era is marked claimed in legacy storage. + if >::get(validator) + .map(|l| l.legacy_claimed_rewards.contains(&era)) + .unwrap_or_default() + { + return false + } + + ClaimedRewards::::get(era, validator).len() < page_count as usize + } + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards /// are relevant/claimable. - // Refer tracker issue for cleanup: #13034 + // Refer tracker issue for cleanup: https://github.com/paritytech/polkadot-sdk/issues/433 pub(crate) fn is_rewards_claimed_with_legacy_fallback( era: EraIndex, ledger: &StakingLedger, diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index f4d4a7133dd5..4eb24311ab34 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1183,6 +1183,10 @@ impl Pallet { pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { EraInfo::::get_page_count(era, &account) } + + pub fn api_pending_rewards(era: EraIndex, account: T::AccountId) -> bool { + EraInfo::::pending_rewards(era, &account) + } } impl ElectionDataProvider for Pallet { diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 6cf5a56e5a6d..d05752f54be7 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6796,6 +6796,113 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( }); } +#[test] +fn test_runtime_api_pending_rewards() { + ExtBuilder::default().build_and_execute(|| { + // GIVEN + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let stake = 100; + + // validator with non-paged exposure, rewards marked in legacy claimed rewards. + let validator_one = 301; + // validator with non-paged exposure, rewards marked in paged claimed rewards. + let validator_two = 302; + // validator with paged exposure. + let validator_three = 303; + + // Set staker + for v in validator_one..=validator_three { + let _ = Balances::make_free_balance_be(&v, stake); + assert_ok!(Staking::bond(RuntimeOrigin::signed(v), stake, RewardDestination::Staked)); + } + + // Add reward points + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(validator_one, 1), (validator_two, 1), (validator_three, 1)] + .into_iter() + .collect(), + }; + ErasRewardPoints::::insert(0, reward); + + // build exposure + let mut individual_exposures: Vec> = vec![]; + for i in 0..=MaxExposurePageSize::get() { + individual_exposures.push(IndividualExposure { who: i.into(), value: stake }); + } + let exposure = Exposure:: { + total: stake * (MaxExposurePageSize::get() as Balance + 2), + own: stake, + others: individual_exposures, + }; + + // add non-paged exposure for one and two. + >::insert(0, validator_one, exposure.clone()); + >::insert(0, validator_two, exposure.clone()); + // add paged exposure for third validator + EraInfo::::set_exposure(0, &validator_three, exposure); + + // add some reward to be distributed + ErasValidatorReward::::insert(0, 1000); + + // mark rewards claimed for validator_one in legacy claimed rewards + >::insert( + validator_one, + StakingLedgerInspect { + stash: validator_one, + total: stake, + active: stake, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![0], + }, + ); + + // SCENARIO ONE: rewards already marked claimed in legacy storage. + // runtime api should return false for pending rewards for validator_one. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + // and if we try to pay, we get an error. + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO TWO: non-paged exposure + // validator two has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_two)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO THREE: validator with paged exposure (two pages). + // validator three has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // validator three has two pages of exposure, so pending rewards is still true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // payout again + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // for eras with no exposure, pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + }); +} + mod staking_interface { use frame_support::storage::with_storage_layer; use sp_staking::StakingInterface; From 954150f3b5fdb7d07d1ed01b138e2025245bb227 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Sun, 28 Apr 2024 16:29:21 +0100 Subject: [PATCH 19/27] remove unnessisary use statements due to 2021 core prelude (#4183) Some traits are already included in the 2021 prelude and so shouldn't be needed to use explicitly: use `convert::TryFrom`, `convert::TryInto`, and `iter::FromIterator` are removed. ( https://doc.rust-lang.org/core/prelude/rust_2021/ ) No breaking changes or change of functionality, so I think no PR doc is needed in this case. (Motivation: Removes some references to `sp-std`) --- bridges/bin/runtime-common/src/messages.rs | 2 +- bridges/modules/grandpa/src/lib.rs | 2 +- bridges/primitives/runtime/src/chain.rs | 2 +- bridges/primitives/runtime/src/lib.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_lane.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_metrics.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_target.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/envelope.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/lib.rs | 2 +- bridges/snowbridge/primitives/beacon/src/bits.rs | 2 +- bridges/snowbridge/primitives/beacon/src/serde_utils.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/header.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/mpt.rs | 2 +- bridges/snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/consensus/aura/src/collator.rs | 2 +- cumulus/client/consensus/aura/src/collators/basic.rs | 2 +- cumulus/client/consensus/aura/src/collators/lookahead.rs | 2 +- cumulus/client/network/src/lib.rs | 2 +- cumulus/parachains/pallets/collective-content/Cargo.toml | 2 +- polkadot/node/core/bitfield-signing/src/lib.rs | 2 +- polkadot/node/network/bitfield-distribution/src/tests.rs | 2 +- .../node/network/collator-protocol/src/collator_side/mod.rs | 1 - .../network/collator-protocol/src/validator_side/mod.rs | 2 -- .../network/statement-distribution/src/legacy_v1/tests.rs | 2 +- .../subsystem-types/src/messages/network_bridge_event.rs | 2 +- polkadot/xcm/src/v3/junction.rs | 1 - polkadot/xcm/src/v3/junctions.rs | 2 +- polkadot/xcm/src/v3/mod.rs | 6 +----- polkadot/xcm/src/v3/multiasset.rs | 5 +---- polkadot/xcm/src/v3/multilocation.rs | 6 +----- polkadot/xcm/src/v4/asset.rs | 5 +---- polkadot/xcm/src/v4/junction.rs | 1 - polkadot/xcm/src/v4/junctions.rs | 2 +- polkadot/xcm/src/v4/location.rs | 6 +----- polkadot/xcm/src/v4/mod.rs | 6 +----- polkadot/xcm/xcm-builder/src/tests/mod.rs | 1 - substrate/client/consensus/grandpa/rpc/src/lib.rs | 2 +- substrate/client/consensus/grandpa/src/environment.rs | 1 - substrate/client/mixnet/Cargo.toml | 2 +- .../network/src/protocol/notifications/upgrade/collec.rs | 1 - substrate/frame/Cargo.toml | 2 +- substrate/frame/alliance/src/benchmarking.rs | 6 +----- substrate/frame/alliance/src/lib.rs | 2 +- substrate/frame/alliance/src/mock.rs | 1 - substrate/frame/alliance/src/types.rs | 2 +- substrate/frame/examples/frame-crate/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/node-authorization/src/lib.rs | 2 +- substrate/frame/safe-mode/src/lib.rs | 1 - substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/transaction-payment/rpc/src/lib.rs | 2 +- substrate/frame/tx-pause/src/lib.rs | 2 +- substrate/primitives/consensus/sassafras/Cargo.toml | 2 +- substrate/primitives/core/fuzz/Cargo.toml | 1 + substrate/primitives/mixnet/Cargo.toml | 2 +- substrate/primitives/state-machine/src/basic.rs | 1 - 56 files changed, 46 insertions(+), 82 deletions(-) diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs index 4aca53f3b983..0fe9935dbdb6 100644 --- a/bridges/bin/runtime-common/src/messages.rs +++ b/bridges/bin/runtime-common/src/messages.rs @@ -35,7 +35,7 @@ use frame_support::{traits::Get, weights::Weight}; use hash_db::Hasher; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, marker::PhantomData, vec::Vec}; +use sp_std::{marker::PhantomData, vec::Vec}; /// Bidirectional message bridge. pub trait MessageBridge { diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index cb536eb07ff6..efcbfb1654b3 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -49,7 +49,7 @@ use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, }; -use sp_std::{boxed::Box, convert::TryInto, prelude::*}; +use sp_std::{boxed::Box, prelude::*}; mod call_ext; #[cfg(test)] diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 1b1c623104f9..369386e41b0c 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, FixedPointOperand, }; -use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; +use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; /// Chain call, that is either SCALE-encoded, or decoded. #[derive(Debug, Clone, PartialEq)] diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index c9c5c9412913..5daba0351ad4 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -31,7 +31,7 @@ use sp_runtime::{ traits::{BadOrigin, Header as HeaderT, UniqueSaturatedInto}, RuntimeDebug, }; -use sp_std::{convert::TryFrom, fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; +use sp_std::{fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index abeab8c1402d..58e9ded312df 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -46,7 +46,7 @@ use relay_utils::{ }; use sp_core::Pair; use sp_runtime::traits::Zero; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs index 27bf6186c3ba..b30e75bd8bac 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs @@ -32,7 +32,7 @@ use relay_substrate_client::{ use relay_utils::metrics::{MetricsParams, StandaloneMetric}; use sp_core::storage::StorageData; use sp_runtime::{FixedPointNumber, FixedU128}; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 9396e785530d..633b11f0b802 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -45,7 +45,7 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::{convert::TryFrom, ops::RangeInclusive}; +use std::ops::RangeInclusive; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesDeliveryProof = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index 826d535c2cb9..31a8992442d8 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -3,7 +3,7 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; use alloy_primitives::B256; use alloy_sol_types::{sol, SolEvent}; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 8acbb0c2916e..4a1486204eb0 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -50,7 +50,7 @@ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; use sp_runtime::traits::Zero; -use sp_std::{convert::TryFrom, vec}; +use sp_std::vec; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, diff --git a/bridges/snowbridge/primitives/beacon/src/bits.rs b/bridges/snowbridge/primitives/beacon/src/bits.rs index 72b7135ee293..fb03588cf8b7 100644 --- a/bridges/snowbridge/primitives/beacon/src/bits.rs +++ b/bridges/snowbridge/primitives/beacon/src/bits.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use ssz_rs::{Bitvector, Deserialize}; pub fn decompress_sync_committee_bits< diff --git a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs index 07f5cbe724ed..5e39ff912257 100644 --- a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs +++ b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer}; // helper to deserialize arbitrary arrays like [T; N] pub mod arrays { - use std::{convert::TryInto, marker::PhantomData}; + use std::marker::PhantomData; use serde::{ de::{SeqAccess, Visitor}, diff --git a/bridges/snowbridge/primitives/ethereum/src/header.rs b/bridges/snowbridge/primitives/ethereum/src/header.rs index f0b51f8c79de..48fa179fe4fa 100644 --- a/bridges/snowbridge/primitives/ethereum/src/header.rs +++ b/bridges/snowbridge/primitives/ethereum/src/header.rs @@ -8,7 +8,7 @@ use rlp::RlpStream; use scale_info::TypeInfo; use sp_io::hashing::keccak_256; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/bridges/snowbridge/primitives/ethereum/src/mpt.rs b/bridges/snowbridge/primitives/ethereum/src/mpt.rs index 9a2dae486dcc..0365f5e994fe 100644 --- a/bridges/snowbridge/primitives/ethereum/src/mpt.rs +++ b/bridges/snowbridge/primitives/ethereum/src/mpt.rs @@ -3,7 +3,7 @@ //! Helper types to work with Ethereum's Merkle Patricia Trie nodes use ethereum_types::H256; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; pub trait Node { fn contains_hash(&self, hash: H256) -> bool; diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 92970339fac0..7cbb38574034 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -3,7 +3,7 @@ name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" version = "0.2.0" authors = ["Snowfork "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 5b7669c88f47..776052215d93 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -55,7 +55,7 @@ use sp_runtime::{ }; use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, time::Duration}; +use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. pub struct Params { diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a4c22a45266c..1047c6219ad1 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -48,7 +48,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_state_machine::Backend as _; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 3fe87e94b7b9..09416233ea9b 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -67,7 +67,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator::{self as collator_util, SlotClaim}; diff --git a/cumulus/client/network/src/lib.rs b/cumulus/client/network/src/lib.rs index ebd557b805c5..f442ed5840bd 100644 --- a/cumulus/client/network/src/lib.rs +++ b/cumulus/client/network/src/lib.rs @@ -36,7 +36,7 @@ use polkadot_primitives::{ use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, future::FutureExt, Future}; -use std::{convert::TryFrom, fmt, marker::PhantomData, pin::Pin, sync::Arc}; +use std::{fmt, marker::PhantomData, pin::Pin, sync::Arc}; #[cfg(test)] mod tests; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index b3fac47cb4ae..207259bee52c 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-collective-content" version = "0.6.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true description = "Managed content" license = "Apache-2.0" diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 0fc0bb3d2788..89851c4a033b 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -38,7 +38,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use std::{collections::HashMap, iter::FromIterator, time::Duration}; +use std::{collections::HashMap, time::Duration}; use wasm_timer::{Delay, Instant}; mod metrics; diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index 188b51ebccca..dc37f73ec8a1 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -40,7 +40,7 @@ use sp_core::Pair as PairT; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; const TIMEOUT: Duration = Duration::from_millis(50); macro_rules! launch { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index e6aa55235b7a..879caf923285 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -16,7 +16,6 @@ use std::{ collections::{HashMap, HashSet}, - convert::TryInto, time::Duration, }; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index f7b07133bff4..ac8c060827f5 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -20,9 +20,7 @@ use futures::{ use futures_timer::Delay; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - convert::TryInto, future::Future, - iter::FromIterator, time::{Duration, Instant}, }; use tokio_util::sync::CancellationToken; diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 0dea5ad0996e..d4c5f95034ae 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -55,7 +55,7 @@ use sp_application_crypto::{sr25519::Pair, AppCrypto, Pair as TraitPair}; use sp_authority_discovery::AuthorityPair; use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use util::reputation::add_reputation; // Some deterministic genesis hash for protocol names diff --git a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs index fa2c7687b38a..29798c785b9c 100644 --- a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; pub use sc_network::ReputationChange; pub use sc_network_types::PeerId; diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index e9e51941b1ac..32ce352c5c02 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -26,7 +26,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::{TryFrom, TryInto}; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index 9748e81fa55f..7b014304fdaf 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -17,7 +17,7 @@ //! XCM `Junctions`/`InteriorMultiLocation` datatype. use super::{Junction, MultiLocation, NetworkId}; -use core::{convert::TryFrom, mem, result}; +use core::{mem, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index d4e2da07a25a..e7c57f414eb7 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -29,11 +29,7 @@ use super::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 0662077b19d0..9a67b0e4986c 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -42,10 +42,7 @@ use crate::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 18fe01ec8fa7..731e277b29d8 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -20,10 +20,7 @@ use super::{Junction, Junctions}; use crate::{ v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, }; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -766,7 +763,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v2; - use core::convert::TryInto; fn takes_multilocation>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 8abd8f9f8fd0..6b6d200f32fe 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -34,10 +34,7 @@ use crate::v3::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index b5d10484aa02..3ae97de5e9b8 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -23,7 +23,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::TryFrom; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v4/junctions.rs b/polkadot/xcm/src/v4/junctions.rs index 48712dd74c6c..6d1af59e13dc 100644 --- a/polkadot/xcm/src/v4/junctions.rs +++ b/polkadot/xcm/src/v4/junctions.rs @@ -18,7 +18,7 @@ use super::{Junction, Location, NetworkId}; use alloc::sync::Arc; -use core::{convert::TryFrom, mem, ops::Range, result}; +use core::{mem, ops::Range, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index 9275bfdb9492..cee76b689407 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -18,10 +18,7 @@ use super::{traits::Reanchorable, Junction, Junctions}; use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -723,7 +720,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v3; - use core::convert::TryInto; fn takes_location>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 30ee485589a2..77b6d915fcb5 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -24,11 +24,7 @@ use super::v3::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 63d254a10675..16ce3d2cf8ff 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::{test_utils::*, *}; -use core::convert::TryInto; use frame_support::{ assert_err, traits::{ConstU32, ContainsPair, ProcessMessageError}, diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index 0557eab93e29..68de068c3058 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -125,7 +125,7 @@ where #[cfg(test)] mod tests { use super::*; - use std::{collections::HashSet, convert::TryInto, sync::Arc}; + use std::{collections::HashSet, sync::Arc}; use jsonrpsee::{core::EmptyServerParams as EmptyParams, types::SubscriptionId, RpcModule}; use parity_scale_codec::{Decode, Encode}; diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs index d3e2beb84e79..31df038044a4 100644 --- a/substrate/client/consensus/grandpa/src/environment.rs +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -18,7 +18,6 @@ use std::{ collections::{BTreeMap, HashMap}, - iter::FromIterator, marker::PhantomData, pin::Pin, sync::Arc, diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 65b81bda4b08..e605a06c9d9c 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-mixnet" version = "0.4.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 791821b3f75d..33c090ae50e9 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -19,7 +19,6 @@ use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; use std::{ - iter::FromIterator, pin::Pin, task::{Context, Poll}, vec, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 84bab86581ca..ef8d8758f3df 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -2,7 +2,7 @@ name = "polkadot-sdk-frame" version = "0.1.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 710c32a848dd..09e2045555b6 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -19,11 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use core::{ - cmp, - convert::{TryFrom, TryInto}, - mem::size_of, -}; +use core::{cmp, mem::size_of}; use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use frame_benchmarking::{account, v2::*, BenchmarkError}; diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index 1f06241e9c83..ed771c7226ea 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -101,7 +101,7 @@ use sp_runtime::{ traits::{Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index b183e412bed7..7116e69efa17 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -17,7 +17,6 @@ //! Test utilities -use core::convert::{TryFrom, TryInto}; pub use sp_core::H256; use sp_runtime::traits::Hash; pub use sp_runtime::{ diff --git a/substrate/frame/alliance/src/types.rs b/substrate/frame/alliance/src/types.rs index 784993b2bc13..149030b52c67 100644 --- a/substrate/frame/alliance/src/types.rs +++ b/substrate/frame/alliance/src/types.rs @@ -19,7 +19,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{traits::ConstU32, BoundedVec}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; /// A Multihash instance that only supports the basic functionality and no hashing. #[derive( diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 3a0e4f720f95..48cb25f90949 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example-frame-crate" version = "0.0.1" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 6a4ef5c29ac8..964d6acb889a 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "pallet-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 9019a863ad81..a7967536079f 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -47,7 +47,7 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; use sp_runtime::traits::StaticLookup; -use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index 2bf2ebee0a4a..4be0776d6c1f 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -79,7 +79,6 @@ pub mod mock; mod tests; pub mod weights; -use core::convert::TryInto; use frame_support::{ defensive_assert, pallet_prelude::*, diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 09977142efc8..888b1d8f31fc 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-sassafras" version = "0.3.5-dev" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/substrate/frame/transaction-payment/rpc/src/lib.rs b/substrate/frame/transaction-payment/rpc/src/lib.rs index f5323cf852e9..050c7fb8915e 100644 --- a/substrate/frame/transaction-payment/rpc/src/lib.rs +++ b/substrate/frame/transaction-payment/rpc/src/lib.rs @@ -17,7 +17,7 @@ //! RPC interface for the transaction payment pallet. -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; use codec::{Codec, Decode}; use jsonrpsee::{ diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index 31be575fba7c..5904b5ed3162 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -87,7 +87,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use sp_runtime::{traits::Dispatchable, DispatchResult}; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; pub use pallet::*; pub use weights::*; diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 07304ed9b240..50348054da01 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-sassafras" version = "0.3.4-dev" authors.workspace = true description = "Primitives for Sassafras consensus" -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/polkadot-sdk/" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index c6b5a065b6dc..463eaea8ea30 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -2,6 +2,7 @@ name = "sp-core-fuzz" version = "0.0.0" publish = false +edition.workspace = true [lints] workspace = true diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 07840ca63cb2..166609ad922c 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sp-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee2628..8b6f746eaba0 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -33,7 +33,6 @@ use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, - iter::FromIterator, }; /// Simple Map-based Externalities impl. From 92a348f57deed44789511df73d3fbbbcb58d98cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Apr 2024 20:36:25 +0400 Subject: [PATCH 20/27] Bump snow from 0.9.3 to 0.9.6 (#4061) Bumps [snow](https://github.com/mcginty/snow) from 0.9.3 to 0.9.6.

Release notes

Sourced from snow's releases.

v0.9.6

  • Validate invalid PSK positions when building a Noise protocol.
  • Raise errors in various typos/mistakes in Noise patterns when parsing.
  • Deprecate the sodiumoxide backend, as that crate is no longer maintained. We may eventually migrate it to a maintaned version of the crate, but for now it's best to warn users.
  • Set a hard limit in read_message() in transport mode to 65535 to be fully compliant with the Noise specification.

Full Changelog: https://github.com/mcginty/snow/compare/v0.9.5...v0.9.6

v0.9.5

This is a security release that fixes a logic flaw in decryption in TransportState (i.e. the stateful one), where the nonce could increase even when decryption failed, which can cause a desync between the sender and receiver, opening this up as a denial of service vector if the attacker has the ability to inject packets in the channel Noise is talking over.

More details can be found in the advisory: https://github.com/mcginty/snow/security/advisories/GHSA-7g9j-g5jg-3vv3

All users are encouraged to update.

v0.9.4

This is a dependency version bump release because a couple of important dependencies released new versions that needed a Cargo.toml bump:

  • ring 0.17
  • pqcrypto-kyber 0.8
  • aes-gcm 0.10
  • chacha20poly1305 0.10
Commits
  • a4be73f meta: v0.9.6 release
  • 9e53dcf TransportState: limit read_message size to 65535
  • faf0560 Deprecate sodiumoxide resolver
  • 308a24d Add warnings about multiple calls to same method in Builder
  • f280991 Error when extraneous parameters are included in string to parse
  • dbdcc48 Error on duplicate modifiers in parameter string
  • 8b1a819 Validate PSK index in pattern to avoid panic
  • 74e30cf meta: v0.9.5 release
  • 12e8ae5 Stateful nonce desync fix
  • 02c26b7 Remove clap from simple example
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=snow&package-manager=cargo&previous-version=0.9.3&new-version=0.9.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 153 ++++++++--------------------------------------------- 1 file changed, 23 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d64800fb085e..67b0ad4def24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,15 +42,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.5.2" @@ -61,18 +52,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -84,31 +63,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle 2.5.0", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", + "aead", + "aes", "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "ctr", + "ghash", "subtle 2.5.0", ] @@ -2540,18 +2505,6 @@ dependencies = [ "keystream", ] -[[package]] -name = "chacha20" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "zeroize", -] - [[package]] name = "chacha20" version = "0.9.1" @@ -2565,14 +2518,14 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", - "chacha20 0.8.2", - "cipher 0.3.0", - "poly1305 0.7.2", + "aead", + "chacha20", + "cipher 0.4.4", + "poly1305", "zeroize", ] @@ -2652,15 +2605,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -2669,6 +2613,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -3676,15 +3621,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" @@ -6395,16 +6331,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.0" @@ -6412,7 +6338,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug 0.3.0", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -12032,7 +11958,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand 0.8.5", - "rand_core 0.6.4", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -14568,17 +14494,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "poly1305" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" -dependencies = [ - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", -] - [[package]] name = "poly1305" version = "0.8.0" @@ -14587,19 +14502,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -14611,7 +14514,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -17890,7 +17793,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead 0.5.2", + "aead", "arrayref", "arrayvec 0.7.4", "curve25519-dalek 4.1.2", @@ -18542,7 +18445,7 @@ dependencies = [ "bip39", "blake2-rfc", "bs58 0.5.0", - "chacha20 0.9.1", + "chacha20", "crossbeam-queue", "derive_more", "ed25519-zebra 4.0.3", @@ -18564,7 +18467,7 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "poly1305 0.8.0", + "poly1305", "rand 0.8.5", "rand_chacha 0.3.1", "ruzstd", @@ -18627,16 +18530,16 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm", "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek 4.1.2", "rand_core 0.6.4", - "ring 0.16.20", + "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.7", "subtle 2.5.0", @@ -19900,7 +19803,7 @@ dependencies = [ name = "sp-statement-store" version = "10.0.0" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "curve25519-dalek 4.1.2", "ed25519-dalek 2.1.0", "hkdf", @@ -22142,16 +22045,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "universal-hash" version = "0.5.1" From f34d8e3cf033e2a22a41b505c437972a5dc83d78 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:13:01 +0700 Subject: [PATCH 21/27] Remove hard-coded indices from pallet-xcm tests (#4248) # ISSUE - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/4237 # DESCRIPTION Remove all ModuleError with hard-coded indices to pallet Error. For example: ```rs Err(DispatchError::Module(ModuleError { index: 4, error: [2, 0, 0, 0], message: Some("Filtered") })) ``` To ```rs let expected_result = Err(crate::Error::::Filtered.into()); assert_eq!(result, expected_result); ``` # TEST OUTCOME ``` test result: ok. 74 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.02s ``` --------- Co-authored-by: Oliver Tale-Yazdi --- .../pallet-xcm/src/tests/assets_transfer.rs | 218 ++++-------------- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 6 +- 2 files changed, 40 insertions(+), 184 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 7dc05c1cc70e..f42e220d6932 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -22,12 +22,12 @@ use crate::{ DispatchResult, OriginFor, }; use frame_support::{ - assert_ok, + assert_err, assert_ok, traits::{tokens::fungibles::Inspect, Currency}, weights::Weight, }; use polkadot_parachain_primitives::primitives::Id as ParaId; -use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use sp_runtime::traits::AccountIdConversion; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -112,14 +112,8 @@ fn limited_teleport_filtered_assets_disallowed() { 0, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + let expected_result = Err(crate::Error::::Filtered.into()); + assert_eq!(result, expected_result); }); } @@ -365,11 +359,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works( /// Test `limited_teleport_assets` with local asset reserve and local fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -527,11 +517,7 @@ fn transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -542,11 +528,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_ /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -633,11 +615,7 @@ fn remote_asset_reserve_and_local_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -648,11 +626,7 @@ fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -662,11 +636,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disal /// Test `limited_teleport_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -745,7 +715,7 @@ fn local_asset_reserve_and_destination_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 3; @@ -821,11 +791,7 @@ fn transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -835,11 +801,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_ /// Test `limited_teleport_assets` with local asset reserve and destination fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -993,11 +955,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_re /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1102,11 +1060,7 @@ fn remote_asset_reserve_and_destination_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and destination fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1117,11 +1071,7 @@ fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1132,11 +1082,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1222,11 +1168,7 @@ fn local_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1237,11 +1179,7 @@ fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1251,11 +1189,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disal /// Test `limited_teleport_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1366,11 +1300,7 @@ fn destination_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with destination asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1381,11 +1311,7 @@ fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1396,11 +1322,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1485,7 +1407,7 @@ fn remote_asset_reserve_and_remote_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } assert!(matches!( @@ -1558,11 +1480,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_work /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_remote_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1702,11 +1620,7 @@ fn transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { /// Test `limited_reserve_transfer_assets` with local asset reserve and teleported fee disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1716,11 +1630,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowe /// Test `limited_teleport_assets` with local asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1802,7 +1712,7 @@ fn destination_asset_reserve_and_teleported_fee_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 4; @@ -1891,11 +1801,7 @@ fn transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1905,11 +1811,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_dis /// Test `limited_teleport_assets` with destination asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2013,11 +1915,7 @@ fn remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -2028,11 +1926,7 @@ fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2042,11 +1936,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallow /// Test `limited_teleport_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -2088,14 +1978,7 @@ fn reserve_transfer_assets_with_teleportable_asset_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - res, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(res, crate::Error::::Filtered); // Alice native asset is still same assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); // Alice USDT balance is still same @@ -2136,14 +2019,7 @@ fn transfer_assets_with_filtered_teleported_fee_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(result, crate::Error::::Filtered); }); } @@ -2350,11 +2226,7 @@ fn transfer_assets_with_teleportable_asset_and_local_fee_reserve_works() { /// Test `limited_reserve_transfer_assets` with teleportable asset and local fee reserve disallowed. #[test] fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2364,11 +2236,7 @@ fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallo /// Test `limited_teleport_assets` with teleportable asset and local fee reserve disallowed. #[test] fn teleport_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2541,11 +2409,7 @@ fn transfer_teleported_assets_using_destination_reserve_fee_works() { /// disallowed. #[test] fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2555,11 +2419,7 @@ fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() /// Test `limited_teleport_assets` with teleported asset reserve and destination fee disallowed. #[test] fn teleport_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_teleport_assets, expected_result, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 8faf16e0d2a9..782c8bed478e 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -557,11 +557,7 @@ fn incomplete_execute_reverts_side_effects() { ), pays_fee: frame_support::dispatch::Pays::Yes, }, - error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { - index: 4, - error: [24, 0, 0, 0,], - message: Some("LocalExecutionIncomplete") - }) + error: sp_runtime::DispatchError::from(Error::::LocalExecutionIncomplete) }) ); }); From 0031d49d1ec083c62a4e2b5bf594b7f45f84ab0d Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 29 Apr 2024 17:55:45 +0200 Subject: [PATCH 22/27] [Staking] Not allow reap stash for virtual stakers (#4311) Related to https://github.com/paritytech/polkadot-sdk/pull/3905. Since virtual stakers does not have a real balance, they should not be allowed to be reaped. The proper reaping for agents slashed will be done in a separate PR. --- prdoc/pr_4311.prdoc | 9 ++ substrate/frame/staking/src/pallet/impls.rs | 4 +- substrate/frame/staking/src/pallet/mod.rs | 8 ++ substrate/frame/staking/src/tests.rs | 93 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4311.prdoc diff --git a/prdoc/pr_4311.prdoc b/prdoc/pr_4311.prdoc new file mode 100644 index 000000000000..cf32acaf0089 --- /dev/null +++ b/prdoc/pr_4311.prdoc @@ -0,0 +1,9 @@ +title: Not allow reap stash for virtual stakers. + +doc: + - audience: Runtime Dev + description: | + Add guards to staking dispathables to prevent virtual stakers to be reaped. + +crates: +- name: pallet-staking diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 4eb24311ab34..5b2a55303e2c 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -87,10 +87,12 @@ impl Pallet { StakingLedger::::paired_account(Stash(stash.clone())) } - /// Inspects and returns the corruption state of a ledger and bond, if any. + /// Inspects and returns the corruption state of a ledger and direct bond, if any. /// /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + /// It is also meant to check state for direct bonds and may not work as expected for virtual + /// bonds. pub(crate) fn inspect_bond_state( stash: &T::AccountId, ) -> Result> { diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 9c968d883444..16ad510c562b 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -868,6 +868,8 @@ pub mod pallet { RewardDestinationRestricted, /// Not enough funds available to withdraw. NotEnoughFunds, + /// Operation not allowed for virtual stakers. + VirtualStakerNotAllowed, } #[pallet::hooks] @@ -1634,6 +1636,9 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; + // virtual stakers should not be allowed to be reaped. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + let ed = T::Currency::minimum_balance(); let reapable = T::Currency::total_balance(&stash) < ed || Self::ledger(Stash(stash.clone())).map(|l| l.total).unwrap_or_default() < ed; @@ -1994,6 +1999,9 @@ pub mod pallet { ) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; + // cannot restore ledger for virtual stakers. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); let stash_balance = T::Currency::free_balance(&stash); diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index d05752f54be7..3cb51604aa6b 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -7205,6 +7205,99 @@ mod staking_unchecked { assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); }) } + + #[test] + fn virtual_stakers_cannot_be_reaped() { + ExtBuilder::default() + // we need enough validators such that disables are allowed. + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + // make 101 only nominate 11. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![11])); + + mock::start_active_era(1); + + // slash all stake. + let slash_percent = Perbill::from_percent(100); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let validator_balance = Balances::free_balance(&11); + let validator_stake = Staking::ledger(11.into()).unwrap().total; + let nominator_balance = Balances::free_balance(&101); + let nominator_stake = Staking::ledger(101.into()).unwrap().total; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); + + // both stakes must have been decreased to 0. + assert_eq!(Staking::ledger(101.into()).unwrap().active, 0); + assert_eq!(Staking::ledger(11.into()).unwrap().active, 0); + + // all validator stake is slashed + assert_eq_error_rate!( + validator_balance - validator_stake, + Balances::free_balance(&11), + 1 + ); + // Because slashing happened. + assert!(is_disabled(11)); + + // Virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // Slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_stake); + + // validator can be reaped. + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(10), 11, u32::MAX)); + // nominator is a virtual staker and cannot be reaped. + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(10), 101, u32::MAX), + Error::::VirtualStakerNotAllowed + ); + }) + } + + #[test] + fn restore_ledger_not_allowed_for_virtual_stakers() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + // 333 is corrupted + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // migrate to virtual staker. + ::migrate_to_virtual_staker(&333); + + // recover the ledger won't work for virtual staker + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None), + Error::::VirtualStakerNotAllowed + ); + + // migrate 333 back to normal staker + >::remove(333); + + // try restore again + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + }) + } } mod ledger { use super::*; From 4875ea11aeef4f3fc7d724940e5ffb703830619b Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 29 Apr 2024 17:22:23 -0400 Subject: [PATCH 23/27] Refactor XCM Simulator Example (#4220) This PR does a "developer experience" refactor of the XCM Simulator Example. I was looking for existing code / documentation where developers could better learn about working with and configuring XCM. The XCM Simulator was a natural starting point due to the fact that it can emulate end to end XCM scenarios, without needing to spawn multiple real chains. However, the XCM Simulator Example was just 3 giant files with a ton of configurations, runtime, pallets, and tests mashed together. This PR breaks down the XCM Simulator Example in a way that I believe is more approachable by a new developer who is looking to navigate the various components of the end to end example, and modify it themselves. The basic structure is: - xcm simulator example - lib (tries to only use the xcm simulator macros) - tests - relay-chain - mod (basic runtime that developers should be familiar with) - xcm-config - mod (contains the `XcmConfig` type - various files for each custom configuration - parachain - mock_msg_queue (custom pallet for simulator example) - mod (basic runtime that developers should be familiar with) - xcm-config - mod (contains the `XcmConfig` type - various files for each custom configuration I would like to add more documentation to this too, but I think this is a first step to be accepted which will affect how documentation is added to the example --------- Co-authored-by: Francisco Aguirre Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- polkadot/xcm/xcm-simulator/example/src/lib.rs | 529 +----------------- .../xcm-simulator/example/src/parachain.rs | 470 ---------------- .../example/src/parachain/mock_msg_queue.rs | 185 ++++++ .../example/src/parachain/mod.rs | 182 ++++++ .../parachain/xcm_config/asset_transactor.rs | 39 ++ .../src/parachain/xcm_config/barrier.rs | 20 + .../src/parachain/xcm_config/constants.rs | 30 + .../xcm_config/location_converter.rs | 25 + .../example/src/parachain/xcm_config/mod.rs | 63 +++ .../parachain/xcm_config/origin_converter.rs | 29 + .../src/parachain/xcm_config/reserve.rs | 21 + .../src/parachain/xcm_config/teleporter.rs | 27 + .../src/parachain/xcm_config/weigher.rs | 27 + .../{relay_chain.rs => relay_chain/mod.rs} | 145 +---- .../xcm_config/asset_transactor.rs | 38 ++ .../src/relay_chain/xcm_config/barrier.rs | 20 + .../src/relay_chain/xcm_config/constants.rs | 31 + .../xcm_config/location_converter.rs | 25 + .../example/src/relay_chain/xcm_config/mod.rs | 62 ++ .../xcm_config/origin_converter.rs | 34 ++ .../src/relay_chain/xcm_config/weigher.rs | 27 + .../xcm/xcm-simulator/example/src/tests.rs | 513 +++++++++++++++++ prdoc/pr_4220.prdoc | 11 + 23 files changed, 1435 insertions(+), 1118 deletions(-) delete mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs rename polkadot/xcm/xcm-simulator/example/src/{relay_chain.rs => relay_chain/mod.rs} (53%) create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/tests.rs create mode 100644 prdoc/pr_4220.prdoc diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 56e204bf5718..6fb9a69770ea 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -17,13 +17,16 @@ mod parachain; mod relay_chain; +#[cfg(test)] +mod tests; + use sp_runtime::BuildStorage; use sp_tracing; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; -pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); pub const INITIAL_BALANCE: u128 = 1_000_000_000; decl_test_parachain! { @@ -68,27 +71,27 @@ decl_test_network! { pub fn parent_account_id() -> parachain::AccountId { let location = (Parent,); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn child_account_id(para: u32) -> relay_chain::AccountId { let location = (Parachain(para),); - relay_chain::LocationToAccountId::convert_location(&location.into()).unwrap() + relay_chain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn child_account_account_id(para: u32, who: sp_runtime::AccountId32) -> relay_chain::AccountId { let location = (Parachain(para), AccountId32 { network: None, id: who.into() }); - relay_chain::LocationToAccountId::convert_location(&location.into()).unwrap() + relay_chain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn sibling_account_account_id(para: u32, who: sp_runtime::AccountId32) -> parachain::AccountId { let location = (Parent, Parachain(para), AccountId32 { network: None, id: who.into() }); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn parent_account_account_id(who: sp_runtime::AccountId32) -> parachain::AccountId { let location = (Parent, AccountId32 { network: None, id: who.into() }); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { @@ -137,517 +140,3 @@ pub fn relay_ext() -> sp_io::TestExternalities { pub type RelayChainPalletXcm = pallet_xcm::Pallet; pub type ParachainPalletXcm = pallet_xcm::Pallet; - -#[cfg(test)] -mod tests { - use super::*; - - use codec::Encode; - use frame_support::{assert_ok, weights::Weight}; - use xcm::latest::QueryResponseInfo; - use xcm_simulator::TestExt; - - // Helper function for forming buy execution message - fn buy_execution(fees: impl Into) -> Instruction { - BuyExecution { fees: fees.into(), weight_limit: Unlimited } - } - - #[test] - fn remote_account_ids_work() { - child_account_account_id(1, ALICE); - sibling_account_account_id(1, ALICE); - parent_account_account_id(ALICE); - } - - #[test] - fn dmp() { - MockNet::reset(); - - let remark = parachain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::send_xcm( - Here, - Parachain(1), - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - ParaA::execute_with(|| { - use parachain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn ump() { - MockNet::reset(); - - let remark = relay_chain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - ParaA::execute_with(|| { - assert_ok!(ParachainPalletXcm::send_xcm( - Here, - Parent, - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - Relay::execute_with(|| { - use relay_chain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn xcmp() { - MockNet::reset(); - - let remark = parachain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - ParaA::execute_with(|| { - assert_ok!(ParachainPalletXcm::send_xcm( - Here, - (Parent, Parachain(2)), - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - ParaB::execute_with(|| { - use parachain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn reserve_transfer() { - MockNet::reset(); - - let withdraw_amount = 123; - - Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( - relay_chain::RuntimeOrigin::signed(ALICE), - Box::new(Parachain(1).into()), - Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), - Box::new((Here, withdraw_amount).into()), - 0, - Unlimited, - )); - assert_eq!( - relay_chain::Balances::free_balance(&child_account_id(1)), - INITIAL_BALANCE + withdraw_amount - ); - }); - - ParaA::execute_with(|| { - // free execution, full amount received - assert_eq!( - pallet_balances::Pallet::::free_balance(&ALICE), - INITIAL_BALANCE + withdraw_amount - ); - }); - } - - #[test] - fn remote_locking_and_unlocking() { - MockNet::reset(); - - let locked_amount = 100; - - ParaB::execute_with(|| { - let message = Xcm(vec![LockAsset { - asset: (Here, locked_amount).into(), - unlocker: Parachain(1).into(), - }]); - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); - }); - - Relay::execute_with(|| { - use pallet_balances::{BalanceLock, Reasons}; - assert_eq!( - relay_chain::Balances::locks(&child_account_id(2)), - vec![BalanceLock { - id: *b"py/xcmlk", - amount: locked_amount, - reasons: Reasons::All - }] - ); - }); - - ParaA::execute_with(|| { - assert_eq!( - parachain::MsgQueue::received_dmp(), - vec![Xcm(vec![NoteUnlockable { - owner: (Parent, Parachain(2)).into(), - asset: (Parent, locked_amount).into() - }])] - ); - }); - - ParaB::execute_with(|| { - // Request unlocking part of the funds on the relay chain - let message = Xcm(vec![RequestUnlock { - asset: (Parent, locked_amount - 50).into(), - locker: Parent.into(), - }]); - assert_ok!(ParachainPalletXcm::send_xcm(Here, (Parent, Parachain(1)), message)); - }); - - Relay::execute_with(|| { - use pallet_balances::{BalanceLock, Reasons}; - // Lock is reduced - assert_eq!( - relay_chain::Balances::locks(&child_account_id(2)), - vec![BalanceLock { - id: *b"py/xcmlk", - amount: locked_amount - 50, - reasons: Reasons::All - }] - ); - }); - } - - /// Scenario: - /// A parachain transfers an NFT resident on the relay chain to another parachain account. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn withdraw_and_deposit_nft() { - MockNet::reset(); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(1))); - }); - - ParaA::execute_with(|| { - let message = Xcm(vec![TransferAsset { - assets: (GeneralIndex(1), 42u32).into(), - beneficiary: Parachain(2).into(), - }]); - // Send withdraw and deposit - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message)); - }); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(2))); - }); - } - - /// Scenario: - /// The relay-chain teleports an NFT to a parachain. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn teleport_nft() { - MockNet::reset(); - - Relay::execute_with(|| { - // Mint the NFT (1, 69) and give it to our "parachain#1 alias". - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 1, - 69, - child_account_account_id(1, ALICE), - )); - // The parachain#1 alias of Alice is what must hold it on the Relay-chain for it to be - // withdrawable by Alice on the parachain. - assert_eq!( - relay_chain::Uniques::owner(1, 69), - Some(child_account_account_id(1, ALICE)) - ); - }); - ParaA::execute_with(|| { - assert_ok!(parachain::ForeignUniques::force_create( - parachain::RuntimeOrigin::root(), - (Parent, GeneralIndex(1)).into(), - ALICE, - false, - )); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), - None, - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); - - // IRL Alice would probably just execute this locally on the Relay-chain, but we can't - // easily do that here since we only send between chains. - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(1), 69u32).into()), - InitiateTeleport { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - // Send teleport - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), - Some(ALICE), - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); - }); - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 69), None); - }); - } - - /// Scenario: - /// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a - /// trustless-backed-derived locally. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn reserve_asset_transfer_nft() { - sp_tracing::init_for_tests(); - MockNet::reset(); - - Relay::execute_with(|| { - assert_ok!(relay_chain::Uniques::force_create( - relay_chain::RuntimeOrigin::root(), - 2, - ALICE, - false - )); - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 2, - 69, - child_account_account_id(1, ALICE) - )); - assert_eq!( - relay_chain::Uniques::owner(2, 69), - Some(child_account_account_id(1, ALICE)) - ); - }); - ParaA::execute_with(|| { - assert_ok!(parachain::ForeignUniques::force_create( - parachain::RuntimeOrigin::root(), - (Parent, GeneralIndex(2)).into(), - ALICE, - false, - )); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), - None, - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); - - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(2), 69u32).into()), - DepositReserveAsset { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - // Send transfer - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - log::debug!(target: "xcm-executor", "Hello"); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), - Some(ALICE), - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); - }); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_id(1))); - }); - } - - /// Scenario: - /// The relay-chain creates an asset class on a parachain and then Alice transfers her NFT into - /// that parachain's sovereign account, who then mints a trustless-backed-derivative locally. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn reserve_asset_class_create_and_reserve_transfer() { - MockNet::reset(); - - Relay::execute_with(|| { - assert_ok!(relay_chain::Uniques::force_create( - relay_chain::RuntimeOrigin::root(), - 2, - ALICE, - false - )); - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 2, - 69, - child_account_account_id(1, ALICE) - )); - assert_eq!( - relay_chain::Uniques::owner(2, 69), - Some(child_account_account_id(1, ALICE)) - ); - - let message = Xcm(vec![Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(1_000_000_000, 1024 * 1024), - call: parachain::RuntimeCall::from( - pallet_uniques::Call::::create { - collection: (Parent, 2u64).into(), - admin: parent_account_id(), - }, - ) - .encode() - .into(), - }]); - // Send creation. - assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); - }); - ParaA::execute_with(|| { - // Then transfer - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(2), 69u32).into()), - DepositReserveAsset { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - assert_eq!(parachain::Balances::reserved_balance(&parent_account_id()), 1000); - assert_eq!( - parachain::ForeignUniques::collection_owner((Parent, 2u64).into()), - Some(parent_account_id()) - ); - }); - } - - /// Scenario: - /// A parachain transfers funds on the relay chain to another parachain account. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn withdraw_and_deposit() { - MockNet::reset(); - - let send_amount = 10; - - ParaA::execute_with(|| { - let message = Xcm(vec![ - WithdrawAsset((Here, send_amount).into()), - buy_execution((Here, send_amount)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, - ]); - // Send withdraw and deposit - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); - }); - - Relay::execute_with(|| { - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(1)), - INITIAL_BALANCE - send_amount - ); - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(2)), - INITIAL_BALANCE + send_amount - ); - }); - } - - /// Scenario: - /// A parachain wants to be notified that a transfer worked correctly. - /// It sends a `QueryHolding` after the deposit to get notified on success. - /// - /// Asserts that the balances are updated correctly and the expected XCM is sent. - #[test] - fn query_holding() { - MockNet::reset(); - - let send_amount = 10; - let query_id_set = 1234; - - // Send a message which fully succeeds on the relay chain - ParaA::execute_with(|| { - let message = Xcm(vec![ - WithdrawAsset((Here, send_amount).into()), - buy_execution((Here, send_amount)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, - ReportHolding { - response_info: QueryResponseInfo { - destination: Parachain(1).into(), - query_id: query_id_set, - max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), - }, - assets: All.into(), - }, - ]); - // Send withdraw and deposit with query holding - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone(),)); - }); - - // Check that transfer was executed - Relay::execute_with(|| { - // Withdraw executed - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(1)), - INITIAL_BALANCE - send_amount - ); - // Deposit executed - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(2)), - INITIAL_BALANCE + send_amount - ); - }); - - // Check that QueryResponse message was received - ParaA::execute_with(|| { - assert_eq!( - parachain::MsgQueue::received_dmp(), - vec![Xcm(vec![QueryResponse { - query_id: query_id_set, - response: Response::Assets(Assets::new()), - max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), - querier: Some(Here.into()), - }])], - ); - }); - } -} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs deleted file mode 100644 index 41e62596392e..000000000000 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Parachain runtime mock. - -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use frame_support::{ - construct_runtime, derive_impl, parameter_types, - traits::{ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, -}; - -use frame_system::EnsureRoot; -use sp_core::{ConstU32, H256}; -use sp_runtime::{ - traits::{Get, Hash, IdentityLookup}, - AccountId32, -}; -use sp_std::prelude::*; - -use pallet_xcm::XcmPassthrough; -use polkadot_core_primitives::BlockNumber as RelayBlockNumber; -use polkadot_parachain_primitives::primitives::{ - DmpMessageHandler, Id as ParaId, Sibling, XcmpMessageFormat, XcmpMessageHandler, -}; -use xcm::{latest::prelude::*, VersionedXcm}; -use xcm_builder::{ - Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, ConvertedConcreteId, - EnsureDecodableXcm, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, NoChecking, - NonFungiblesAdapter, ParentIsPreset, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, -}; -use xcm_executor::{ - traits::{ConvertLocation, JustTry}, - Config, XcmExecutor, -}; - -pub type SovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, -); - -pub type AccountId = AccountId32; -pub type Balance = u128; - -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -parameter_types! { - pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; -} - -#[cfg(feature = "runtime-benchmarks")] -pub struct UniquesHelper; -#[cfg(feature = "runtime-benchmarks")] -impl pallet_uniques::BenchmarkHelper for UniquesHelper { - fn collection(i: u16) -> Location { - GeneralIndex(i as u128).into() - } - fn item(i: u16) -> AssetInstance { - AssetInstance::Index(i as u128) - } -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = Location; - type ItemId = AssetInstance; - type Currency = Balances; - type CreateOrigin = ForeignCreators; - type ForceOrigin = frame_system::EnsureRoot; - type CollectionDeposit = frame_support::traits::ConstU128<1_000>; - type ItemDeposit = frame_support::traits::ConstU128<1_000>; - type MetadataDepositBase = frame_support::traits::ConstU128<1_000>; - type AttributeDepositBase = frame_support::traits::ConstU128<1_000>; - type DepositPerByte = frame_support::traits::ConstU128<1>; - type StringLimit = ConstU32<64>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<128>; - type Locker = (); - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type Helper = UniquesHelper; -} - -// `EnsureOriginWithArg` impl for `CreateOrigin` which allows only XCM origins -// which are locations containing the class location. -pub struct ForeignCreators; -impl EnsureOriginWithArg for ForeignCreators { - type Success = AccountId; - - fn try_origin( - o: RuntimeOrigin, - a: &Location, - ) -> sp_std::result::Result { - let origin_location = pallet_xcm::EnsureXcm::::try_origin(o.clone())?; - if !a.starts_with(&origin_location) { - return Err(o) - } - SovereignAccountOf::convert_location(&origin_location).ok_or(o) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin(a: &Location) -> Result { - Ok(pallet_xcm::Origin::Xcm(a.clone()).into()) - } -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); - pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); -} - -parameter_types! { - pub const KsmLocation: Location = Location::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); -} - -pub type LocationToAccountId = ( - ParentIsPreset, - SiblingParachainConvertsVia, - AccountId32Aliases, - Account32Hash<(), AccountId>, -); - -pub type XcmOriginToCallOrigin = ( - SovereignSignedViaLocation, - SignedAccountId32AsNative, - XcmPassthrough, -); - -parameter_types! { - pub const UnitWeightCost: Weight = Weight::from_parts(1, 1); - pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub ForeignPrefix: Location = (Parent,).into(); -} - -pub type LocalAssetTransactor = ( - FungibleAdapter, LocationToAccountId, AccountId, ()>, - NonFungiblesAdapter< - ForeignUniques, - ConvertedConcreteId, - SovereignAccountOf, - AccountId, - NoChecking, - (), - >, -); - -pub type XcmRouter = EnsureDecodableXcm>; -pub type Barrier = AllowUnpaidExecutionFrom; - -parameter_types! { - pub NftCollectionOne: AssetFilter - = Wild(AllOf { fun: WildNonFungible, id: AssetId((Parent, GeneralIndex(1)).into()) }); - pub NftCollectionOneForRelay: (AssetFilter, Location) - = (NftCollectionOne::get(), (Parent,).into()); -} -pub type TrustedTeleporters = xcm_builder::Case; -pub type TrustedReserves = EverythingBut>; - -pub struct XcmConfig; -impl Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = XcmOriginToCallOrigin; - type IsReserve = (NativeAsset, TrustedReserves); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = FixedRateOfFungible; - type ResponseHandler = (); - type AssetTrap = (); - type AssetLocker = PolkadotXcm; - type AssetExchanger = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = (); - type FeeManager = (); - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); -} - -#[frame_support::pallet] -pub mod mock_msg_queue { - use super::*; - use frame_support::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type XcmExecutor: ExecuteXcm; - } - - #[pallet::call] - impl Pallet {} - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::storage] - #[pallet::getter(fn parachain_id)] - pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; - - #[pallet::storage] - #[pallet::getter(fn received_dmp)] - /// A queue of received DMP messages - pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; - - impl Get for Pallet { - fn get() -> ParaId { - Self::parachain_id() - } - } - - pub type MessageId = [u8; 32]; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - // XCMP - /// Some XCM was executed OK. - Success(Option), - /// Some XCM failed. - Fail(Option, XcmError), - /// Bad XCM version used. - BadVersion(Option), - /// Bad XCM format used. - BadFormat(Option), - - // DMP - /// Downward message is invalid XCM. - InvalidFormat(MessageId), - /// Downward message is unsupported version of XCM. - UnsupportedVersion(MessageId), - /// Downward message executed with the given outcome. - ExecutedDownward(MessageId, Outcome), - } - - impl Pallet { - pub fn set_para_id(para_id: ParaId) { - ParachainId::::put(para_id); - } - - fn handle_xcmp_message( - sender: ParaId, - _sent_at: RelayBlockNumber, - xcm: VersionedXcm, - max_weight: Weight, - ) -> Result { - let hash = Encode::using_encoded(&xcm, T::Hashing::hash); - let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); - let (result, event) = match Xcm::::try_from(xcm) { - Ok(xcm) => { - let location = (Parent, Parachain(sender.into())); - match T::XcmExecutor::prepare_and_execute( - location, - xcm, - &mut message_hash, - max_weight, - Weight::zero(), - ) { - Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), - Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), - // As far as the caller is concerned, this was dispatched without error, so - // we just report the weight used. - Outcome::Incomplete { used, error } => - (Ok(used), Event::Fail(Some(hash), error)), - } - }, - Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), - }; - Self::deposit_event(event); - result - } - } - - impl XcmpMessageHandler for Pallet { - fn handle_xcmp_messages<'a, I: Iterator>( - iter: I, - max_weight: Weight, - ) -> Weight { - for (sender, sent_at, data) in iter { - let mut data_ref = data; - let _ = XcmpMessageFormat::decode(&mut data_ref) - .expect("Simulator encodes with versioned xcm format; qed"); - - let mut remaining_fragments = data_ref; - while !remaining_fragments.is_empty() { - if let Ok(xcm) = - VersionedXcm::::decode(&mut remaining_fragments) - { - let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); - } else { - debug_assert!(false, "Invalid incoming XCMP message data"); - } - } - } - max_weight - } - } - - impl DmpMessageHandler for Pallet { - fn handle_dmp_messages( - iter: impl Iterator)>, - limit: Weight, - ) -> Weight { - for (_i, (_sent_at, data)) in iter.enumerate() { - let mut id = sp_io::hashing::blake2_256(&data[..]); - let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); - match maybe_versioned { - Err(_) => { - Self::deposit_event(Event::InvalidFormat(id)); - }, - Ok(versioned) => match Xcm::try_from(versioned) { - Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), - Ok(x) => { - let outcome = T::XcmExecutor::prepare_and_execute( - Parent, - x.clone(), - &mut id, - limit, - Weight::zero(), - ); - >::append(x); - Self::deposit_event(Event::ExecutedDownward(id, outcome)); - }, - }, - } - } - limit - } - } -} - -impl mock_msg_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} - -pub type LocalOriginToLocation = SignedToAccountId32; - -pub struct TrustedLockerCase(PhantomData); -impl> ContainsPair for TrustedLockerCase { - fn contains(origin: &Location, asset: &Asset) -> bool { - let (o, a) = T::get(); - a.matches(asset) && &o == origin - } -} - -parameter_types! { - pub RelayTokenForRelay: (Location, AssetFilter) = (Parent.into(), Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible })); -} - -pub type TrustedLockers = TrustedLockerCase; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - type ExecuteXcmOrigin = EnsureXcmOrigin; - type XcmExecuteFilter = Everything; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Nothing; - type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = TrustedLockers; - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); - type WeightInfo = pallet_xcm::TestWeightInfo; - type AdminOrigin = EnsureRoot; -} - -type Block = frame_system::mocking::MockBlock; - -construct_runtime!( - pub enum Runtime - { - System: frame_system, - Balances: pallet_balances, - MsgQueue: mock_msg_queue, - PolkadotXcm: pallet_xcm, - ForeignUniques: pallet_uniques, - } -); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs new file mode 100644 index 000000000000..17cde921f3e2 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs @@ -0,0 +1,185 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub use pallet::*; +use polkadot_core_primitives::BlockNumber as RelayBlockNumber; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use sp_runtime::traits::{Get, Hash}; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // XCMP + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + /// Get the Parachain Id. + pub fn parachain_id() -> ParaId { + ParachainId::::get() + } + + /// Set the Parachain Id. + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + /// Get the queue of receieved DMP messages. + pub fn received_dmp() -> Vec> { + ReceivedDmp::::get() + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::prepare_and_execute( + location, + xcm, + &mut message_hash, + max_weight, + Weight::zero(), + ) { + Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), + Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete { used, error } => + (Ok(used), Event::Fail(Some(hash), error)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let mut id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::prepare_and_execute( + Parent, + x.clone(), + &mut id, + limit, + Weight::zero(), + ); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs new file mode 100644 index 000000000000..8021f9551658 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs @@ -0,0 +1,182 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod mock_msg_queue; +mod xcm_config; +pub use xcm_config::*; + +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, + traits::{ConstU128, ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, Nothing}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::EnsureRoot; +use sp_core::ConstU32; +use sp_runtime::{ + traits::{Get, IdentityLookup}, + AccountId32, +}; +use sp_std::prelude::*; +use xcm::latest::prelude::*; +use xcm_builder::{EnsureXcmOrigin, SignedToAccountId32}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; + +pub type AccountId = AccountId32; +pub type Balance = u128; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type ExistentialDeposit = ConstU128<1>; + type AccountStore = System; +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct UniquesHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_uniques::BenchmarkHelper for UniquesHelper { + fn collection(i: u16) -> Location { + GeneralIndex(i as u128).into() + } + fn item(i: u16) -> AssetInstance { + AssetInstance::Index(i as u128) + } +} + +impl pallet_uniques::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = Location; + type ItemId = AssetInstance; + type Currency = Balances; + type CreateOrigin = ForeignCreators; + type ForceOrigin = frame_system::EnsureRoot; + type CollectionDeposit = frame_support::traits::ConstU128<1_000>; + type ItemDeposit = frame_support::traits::ConstU128<1_000>; + type MetadataDepositBase = frame_support::traits::ConstU128<1_000>; + type AttributeDepositBase = frame_support::traits::ConstU128<1_000>; + type DepositPerByte = frame_support::traits::ConstU128<1>; + type StringLimit = ConstU32<64>; + type KeyLimit = ConstU32<64>; + type ValueLimit = ConstU32<128>; + type Locker = (); + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type Helper = UniquesHelper; +} + +// `EnsureOriginWithArg` impl for `CreateOrigin` which allows only XCM origins +// which are locations containing the class location. +pub struct ForeignCreators; +impl EnsureOriginWithArg for ForeignCreators { + type Success = AccountId; + + fn try_origin( + o: RuntimeOrigin, + a: &Location, + ) -> sp_std::result::Result { + let origin_location = pallet_xcm::EnsureXcm::::try_origin(o.clone())?; + if !a.starts_with(&origin_location) { + return Err(o); + } + xcm_config::location_converter::LocationConverter::convert_location(&origin_location) + .ok_or(o) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &Location) -> Result { + Ok(pallet_xcm::Origin::Xcm(a.clone()).into()) + } +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = + SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair for TrustedLockerCase { + fn contains(origin: &Location, asset: &Asset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +parameter_types! { + pub RelayTokenForRelay: (Location, AssetFilter) = (Parent.into(), Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible })); +} + +pub type TrustedLockers = TrustedLockerCase; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = weigher::Weigher; + type UniversalLocation = constants::UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = TrustedLockers; + type SovereignAccountOf = location_converter::LocationConverter; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub struct Runtime { + System: frame_system, + Balances: pallet_balances, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + ForeignUniques: pallet_uniques, + } +); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs new file mode 100644 index 000000000000..25cffcf8cef2 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs @@ -0,0 +1,39 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{ + constants::KsmLocation, location_converter::LocationConverter, AccountId, Balances, + ForeignUniques, +}; +use xcm::latest::prelude::*; +use xcm_builder::{ + ConvertedConcreteId, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, +}; +use xcm_executor::traits::JustTry; + +type LocalAssetTransactor = ( + FungibleAdapter, LocationConverter, AccountId, ()>, + NonFungiblesAdapter< + ForeignUniques, + ConvertedConcreteId, + LocationConverter, + AccountId, + NoChecking, + (), + >, +); + +pub type AssetTransactor = LocalAssetTransactor; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs new file mode 100644 index 000000000000..1c7aa2c6d321 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs @@ -0,0 +1,20 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::traits::Everything; +use xcm_builder::AllowUnpaidExecutionFrom; + +pub type Barrier = AllowUnpaidExecutionFrom; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs new file mode 100644 index 000000000000..f6d0174def8f --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::MsgQueue; +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); + pub const MaxAssetsIntoHolding: u32 = 64; +} + +parameter_types! { + pub const KsmLocation: Location = Location::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Kusama; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs new file mode 100644 index 000000000000..5a54414dd13f --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{constants::RelayNetwork, AccountId}; +use xcm_builder::{AccountId32Aliases, DescribeAllTerminal, DescribeFamily, HashedDescription}; + +type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +pub type LocationConverter = LocationToAccountId; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs new file mode 100644 index 000000000000..0ba02aab9bf9 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs @@ -0,0 +1,63 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod asset_transactor; +pub mod barrier; +pub mod constants; +pub mod location_converter; +pub mod origin_converter; +pub mod reserve; +pub mod teleporter; +pub mod weigher; + +use crate::parachain::{MsgQueue, PolkadotXcm, RuntimeCall}; +use frame_support::traits::{Everything, Nothing}; +use xcm_builder::{EnsureDecodableXcm, FixedRateOfFungible, FrameTransactionalProcessor}; + +// Generated from `decl_test_network!` +pub type XcmRouter = EnsureDecodableXcm>; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = origin_converter::OriginConverter; + type IsReserve = reserve::TrustedReserves; + type IsTeleporter = teleporter::TrustedTeleporters; + type UniversalLocation = constants::UniversalLocation; + type Barrier = barrier::Barrier; + type Weigher = weigher::Weigher; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = constants::MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs new file mode 100644 index 000000000000..5a60f0e60014 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{ + constants::RelayNetwork, location_converter::LocationConverter, RuntimeOrigin, +}; +use pallet_xcm::XcmPassthrough; +use xcm_builder::{SignedAccountId32AsNative, SovereignSignedViaLocation}; + +type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + SignedAccountId32AsNative, + XcmPassthrough, +); + +pub type OriginConverter = XcmOriginToCallOrigin; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs new file mode 100644 index 000000000000..8763a2f37ccd --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::teleporter::TrustedTeleporters; +use frame_support::traits::EverythingBut; +use xcm_builder::NativeAsset; + +pub type TrustedReserves = (NativeAsset, EverythingBut); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs new file mode 100644 index 000000000000..41cb7a5eb2de --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub NftCollectionOne: AssetFilter + = Wild(AllOf { fun: WildNonFungible, id: AssetId((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (AssetFilter, Location) + = (NftCollectionOne::get(), (Parent,).into()); +} + +pub type TrustedTeleporters = xcm_builder::Case; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs new file mode 100644 index 000000000000..4bdc98ea3b0e --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::RuntimeCall; +use frame_support::parameter_types; +use xcm::latest::prelude::*; +use xcm_builder::FixedWeightBounds; + +parameter_types! { + pub const UnitWeightCost: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; +} + +pub type Weigher = FixedWeightBounds; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs similarity index 53% rename from polkadot/xcm/xcm-simulator/example/src/relay_chain.rs rename to polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs index b41df3cfa2b0..f698eba41d44 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs @@ -16,31 +16,29 @@ //! Relay chain runtime mock. +mod xcm_config; +pub use xcm_config::*; + use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, Everything, Nothing, ProcessMessage, ProcessMessageError}, + traits::{ + AsEnsureOriginWithArg, ConstU128, Everything, Nothing, ProcessMessage, ProcessMessageError, + }, weights::{Weight, WeightMeter}, }; use frame_system::EnsureRoot; -use sp_core::{ConstU32, H256}; +use sp_core::ConstU32; use sp_runtime::{traits::IdentityLookup, AccountId32}; -use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, inclusion::{AggregateMessageOrigin, UmpQueueId}, origin, shared, }; use xcm::latest::prelude::*; -use xcm_builder::{ - Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, AsPrefixedGeneralIndex, - ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - ConvertedConcreteId, EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, -}; -use xcm_executor::{traits::JustTry, Config, XcmExecutor}; +use xcm_builder::{IsConcrete, SignedToAccountId32}; +use xcm_executor::XcmExecutor; pub type AccountId = AccountId32; pub type Balance = u128; @@ -51,51 +49,17 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -parameter_types! { - pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; + type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl pallet_uniques::Config for Runtime { @@ -127,83 +91,8 @@ impl configuration::Config for Runtime { type WeightInfo = configuration::TestWeightInfo; } -parameter_types! { - pub const TokenLocation: Location = Here.into_location(); - pub RelayNetwork: NetworkId = ByGenesis([0; 32]); - pub const AnyNetwork: Option = None; - pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); - pub UnitWeightCost: u64 = 1_000; -} - -pub type LocationToAccountId = ( - ChildParachainConvertsVia, - AccountId32Aliases, - Account32Hash<(), AccountId>, -); - -pub type LocalAssetTransactor = ( - FungibleAdapter, LocationToAccountId, AccountId, ()>, - NonFungiblesAdapter< - Uniques, - ConvertedConcreteId, JustTry>, - LocationToAccountId, - AccountId, - NoChecking, - (), - >, -); - -type LocalOriginConverter = ( - SovereignSignedViaLocation, - ChildParachainAsNative, - SignedAccountId32AsNative, - ChildSystemParachainAsSuperuser, -); - -parameter_types! { - pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); - pub TokensPerSecondPerByte: (AssetId, u128, u128) = - (AssetId(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; -} - -pub type XcmRouter = EnsureDecodableXcm; -pub type Barrier = AllowUnpaidExecutionFrom; - -pub struct XcmConfig; -impl Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = (); - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = FixedRateOfFungible; - type ResponseHandler = (); - type AssetTrap = (); - type AssetLocker = XcmPallet; - type AssetExchanger = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = (); - type FeeManager = (); - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); -} - -pub type LocalOriginToLocation = SignedToAccountId32; +pub type LocalOriginToLocation = + SignedToAccountId32; impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -215,16 +104,16 @@ impl pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; + type Weigher = weigher::Weigher; + type UniversalLocation = constants::UniversalLocation; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type Currency = Balances; - type CurrencyMatcher = IsConcrete; + type CurrencyMatcher = IsConcrete; type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; + type SovereignAccountOf = location_converter::LocationConverter; type MaxLockers = ConstU32<8>; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs new file mode 100644 index 000000000000..c212569d4811 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs @@ -0,0 +1,38 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{ + constants::TokenLocation, location_converter::LocationConverter, AccountId, Balances, Uniques, +}; +use xcm_builder::{ + AsPrefixedGeneralIndex, ConvertedConcreteId, FungibleAdapter, IsConcrete, NoChecking, + NonFungiblesAdapter, +}; +use xcm_executor::traits::JustTry; + +type LocalAssetTransactor = ( + FungibleAdapter, LocationConverter, AccountId, ()>, + NonFungiblesAdapter< + Uniques, + ConvertedConcreteId, JustTry>, + LocationConverter, + AccountId, + NoChecking, + (), + >, +); + +pub type AssetTransactor = LocalAssetTransactor; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs new file mode 100644 index 000000000000..1c7aa2c6d321 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs @@ -0,0 +1,20 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::traits::Everything; +use xcm_builder::AllowUnpaidExecutionFrom; + +pub type Barrier = AllowUnpaidExecutionFrom; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs new file mode 100644 index 000000000000..f590c42990da --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs @@ -0,0 +1,31 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub TokensPerSecondPerByte: (AssetId, u128, u128) = + (AssetId(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxAssetsIntoHolding: u32 = 64; +} + +parameter_types! { + pub const TokenLocation: Location = Here.into_location(); + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); + pub UnitWeightCost: u64 = 1_000; +} diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs new file mode 100644 index 000000000000..0f5f4e43dc97 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{constants::RelayNetwork, AccountId}; +use xcm_builder::{AccountId32Aliases, DescribeAllTerminal, DescribeFamily, HashedDescription}; + +type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +pub type LocationConverter = LocationToAccountId; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs new file mode 100644 index 000000000000..a7a8bae51567 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs @@ -0,0 +1,62 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod asset_transactor; +pub mod barrier; +pub mod constants; +pub mod location_converter; +pub mod origin_converter; +pub mod weigher; + +use crate::relay_chain::{RuntimeCall, XcmPallet}; +use frame_support::traits::{Everything, Nothing}; +use xcm_builder::{EnsureDecodableXcm, FixedRateOfFungible, FrameTransactionalProcessor}; +use xcm_executor::Config; + +// Generated from `decl_test_network!` +pub type XcmRouter = EnsureDecodableXcm; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = origin_converter::OriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = constants::UniversalLocation; + type Barrier = barrier::Barrier; + type Weigher = weigher::Weigher; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = constants::MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs new file mode 100644 index 000000000000..3c79912a9262 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs @@ -0,0 +1,34 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{ + constants::RelayNetwork, location_converter::LocationConverter, RuntimeOrigin, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::origin; +use xcm_builder::{ + ChildParachainAsNative, ChildSystemParachainAsSuperuser, SignedAccountId32AsNative, + SovereignSignedViaLocation, +}; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +pub type OriginConverter = LocalOriginConverter; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs new file mode 100644 index 000000000000..5c02565f4600 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::RuntimeCall; +use frame_support::parameter_types; +use xcm::latest::prelude::*; +use xcm_builder::FixedWeightBounds; + +parameter_types! { + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); + pub const MaxInstructions: u32 = 100; +} + +pub type Weigher = FixedWeightBounds; diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs new file mode 100644 index 000000000000..6486a849af36 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -0,0 +1,513 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::*; + +use codec::Encode; +use frame_support::{assert_ok, weights::Weight}; +use xcm::latest::QueryResponseInfo; +use xcm_simulator::TestExt; + +// Helper function for forming buy execution message +fn buy_execution(fees: impl Into) -> Instruction { + BuyExecution { fees: fees.into(), weight_limit: Unlimited } +} + +#[test] +fn remote_account_ids_work() { + child_account_account_id(1, ALICE); + sibling_account_account_id(1, ALICE); + parent_account_account_id(ALICE); +} + +#[test] +fn dmp() { + MockNet::reset(); + + let remark = parachain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + Relay::execute_with(|| { + assert_ok!(RelayChainPalletXcm::send_xcm( + Here, + Parachain(1), + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + ParaA::execute_with(|| { + use parachain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn ump() { + MockNet::reset(); + + let remark = relay_chain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + ParaA::execute_with(|| { + assert_ok!(ParachainPalletXcm::send_xcm( + Here, + Parent, + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + Relay::execute_with(|| { + use relay_chain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn xcmp() { + MockNet::reset(); + + let remark = parachain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + ParaA::execute_with(|| { + assert_ok!(ParachainPalletXcm::send_xcm( + Here, + (Parent, Parachain(2)), + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + ParaB::execute_with(|| { + use parachain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn reserve_transfer() { + MockNet::reset(); + + let withdraw_amount = 123; + + Relay::execute_with(|| { + assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( + relay_chain::RuntimeOrigin::signed(ALICE), + Box::new(Parachain(1).into()), + Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), + Box::new((Here, withdraw_amount).into()), + 0, + Unlimited, + )); + assert_eq!( + relay_chain::Balances::free_balance(&child_account_id(1)), + INITIAL_BALANCE + withdraw_amount + ); + }); + + ParaA::execute_with(|| { + // free execution, full amount received + assert_eq!( + pallet_balances::Pallet::::free_balance(&ALICE), + INITIAL_BALANCE + withdraw_amount + ); + }); +} + +#[test] +fn remote_locking_and_unlocking() { + MockNet::reset(); + + let locked_amount = 100; + + ParaB::execute_with(|| { + let message = Xcm(vec![LockAsset { + asset: (Here, locked_amount).into(), + unlocker: Parachain(1).into(), + }]); + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); + }); + + Relay::execute_with(|| { + use pallet_balances::{BalanceLock, Reasons}; + assert_eq!( + relay_chain::Balances::locks(&child_account_id(2)), + vec![BalanceLock { id: *b"py/xcmlk", amount: locked_amount, reasons: Reasons::All }] + ); + }); + + ParaA::execute_with(|| { + assert_eq!( + parachain::MsgQueue::received_dmp(), + vec![Xcm(vec![NoteUnlockable { + owner: (Parent, Parachain(2)).into(), + asset: (Parent, locked_amount).into() + }])] + ); + }); + + ParaB::execute_with(|| { + // Request unlocking part of the funds on the relay chain + let message = Xcm(vec![RequestUnlock { + asset: (Parent, locked_amount - 50).into(), + locker: Parent.into(), + }]); + assert_ok!(ParachainPalletXcm::send_xcm(Here, (Parent, Parachain(1)), message)); + }); + + Relay::execute_with(|| { + use pallet_balances::{BalanceLock, Reasons}; + // Lock is reduced + assert_eq!( + relay_chain::Balances::locks(&child_account_id(2)), + vec![BalanceLock { + id: *b"py/xcmlk", + amount: locked_amount - 50, + reasons: Reasons::All + }] + ); + }); +} + +/// Scenario: +/// A parachain transfers an NFT resident on the relay chain to another parachain account. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn withdraw_and_deposit_nft() { + MockNet::reset(); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(1))); + }); + + ParaA::execute_with(|| { + let message = Xcm(vec![TransferAsset { + assets: (GeneralIndex(1), 42u32).into(), + beneficiary: Parachain(2).into(), + }]); + // Send withdraw and deposit + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message)); + }); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(2))); + }); +} + +/// Scenario: +/// The relay-chain teleports an NFT to a parachain. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn teleport_nft() { + MockNet::reset(); + + Relay::execute_with(|| { + // Mint the NFT (1, 69) and give it to our "parachain#1 alias". + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 1, + 69, + child_account_account_id(1, ALICE), + )); + // The parachain#1 alias of Alice is what must hold it on the Relay-chain for it to be + // withdrawable by Alice on the parachain. + assert_eq!(relay_chain::Uniques::owner(1, 69), Some(child_account_account_id(1, ALICE))); + }); + ParaA::execute_with(|| { + assert_ok!(parachain::ForeignUniques::force_create( + parachain::RuntimeOrigin::root(), + (Parent, GeneralIndex(1)).into(), + ALICE, + false, + )); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), + None, + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); + + // IRL Alice would probably just execute this locally on the Relay-chain, but we can't + // easily do that here since we only send between chains. + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(1), 69u32).into()), + InitiateTeleport { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + // Send teleport + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), + Some(ALICE), + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); + }); + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 69), None); + }); +} + +/// Scenario: +/// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a +/// trustless-backed-derived locally. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn reserve_asset_transfer_nft() { + sp_tracing::init_for_tests(); + MockNet::reset(); + + Relay::execute_with(|| { + assert_ok!(relay_chain::Uniques::force_create( + relay_chain::RuntimeOrigin::root(), + 2, + ALICE, + false + )); + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 2, + 69, + child_account_account_id(1, ALICE) + )); + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_account_id(1, ALICE))); + }); + ParaA::execute_with(|| { + assert_ok!(parachain::ForeignUniques::force_create( + parachain::RuntimeOrigin::root(), + (Parent, GeneralIndex(2)).into(), + ALICE, + false, + )); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), + None, + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); + + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(2), 69u32).into()), + DepositReserveAsset { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + // Send transfer + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + log::debug!(target: "xcm-executor", "Hello"); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), + Some(ALICE), + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); + }); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_id(1))); + }); +} + +/// Scenario: +/// The relay-chain creates an asset class on a parachain and then Alice transfers her NFT into +/// that parachain's sovereign account, who then mints a trustless-backed-derivative locally. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn reserve_asset_class_create_and_reserve_transfer() { + MockNet::reset(); + + Relay::execute_with(|| { + assert_ok!(relay_chain::Uniques::force_create( + relay_chain::RuntimeOrigin::root(), + 2, + ALICE, + false + )); + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 2, + 69, + child_account_account_id(1, ALICE) + )); + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_account_id(1, ALICE))); + + let message = Xcm(vec![Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: Weight::from_parts(1_000_000_000, 1024 * 1024), + call: parachain::RuntimeCall::from( + pallet_uniques::Call::::create { + collection: (Parent, 2u64).into(), + admin: parent_account_id(), + }, + ) + .encode() + .into(), + }]); + // Send creation. + assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); + }); + ParaA::execute_with(|| { + // Then transfer + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(2), 69u32).into()), + DepositReserveAsset { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + assert_eq!(parachain::Balances::reserved_balance(&parent_account_id()), 1000); + assert_eq!( + parachain::ForeignUniques::collection_owner((Parent, 2u64).into()), + Some(parent_account_id()) + ); + }); +} + +/// Scenario: +/// A parachain transfers funds on the relay chain to another parachain account. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn withdraw_and_deposit() { + MockNet::reset(); + + let send_amount = 10; + + ParaA::execute_with(|| { + let message = Xcm(vec![ + WithdrawAsset((Here, send_amount).into()), + buy_execution((Here, send_amount)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, + ]); + // Send withdraw and deposit + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); + }); + + Relay::execute_with(|| { + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(1)), + INITIAL_BALANCE - send_amount + ); + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(2)), + INITIAL_BALANCE + send_amount + ); + }); +} + +/// Scenario: +/// A parachain wants to be notified that a transfer worked correctly. +/// It sends a `QueryHolding` after the deposit to get notified on success. +/// +/// Asserts that the balances are updated correctly and the expected XCM is sent. +#[test] +fn query_holding() { + MockNet::reset(); + + let send_amount = 10; + let query_id_set = 1234; + + // Send a message which fully succeeds on the relay chain + ParaA::execute_with(|| { + let message = Xcm(vec![ + WithdrawAsset((Here, send_amount).into()), + buy_execution((Here, send_amount)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, + ReportHolding { + response_info: QueryResponseInfo { + destination: Parachain(1).into(), + query_id: query_id_set, + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + }, + assets: All.into(), + }, + ]); + // Send withdraw and deposit with query holding + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone(),)); + }); + + // Check that transfer was executed + Relay::execute_with(|| { + // Withdraw executed + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(1)), + INITIAL_BALANCE - send_amount + ); + // Deposit executed + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(2)), + INITIAL_BALANCE + send_amount + ); + }); + + // Check that QueryResponse message was received + ParaA::execute_with(|| { + assert_eq!( + parachain::MsgQueue::received_dmp(), + vec![Xcm(vec![QueryResponse { + query_id: query_id_set, + response: Response::Assets(Assets::new()), + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + querier: Some(Here.into()), + }])], + ); + }); +} diff --git a/prdoc/pr_4220.prdoc b/prdoc/pr_4220.prdoc new file mode 100644 index 000000000000..d5688ab325cd --- /dev/null +++ b/prdoc/pr_4220.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor XCM Simulator Example + +doc: + - audience: Runtime Dev + description: | + This PR refactors the XCM Simulator Example to improve developer experience when trying to read and understand the example. 3 monolithic files have been broken down into their respective components across various modules. No major logical changes were made. + +crates: [ ] From 1fb058b791276f1ced9c1980e738e968d5dafb45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Andr=C3=A9s=20Dorado=20Su=C3=A1rez?= Date: Tue, 30 Apr 2024 00:06:17 -0500 Subject: [PATCH 24/27] Assets Events for `Deposited` and `Withdrawn` (#4312) Closes #4308 Polkadot address: 12gMhxHw8QjEwLQvnqsmMVY1z5gFa54vND74aMUbhhwN6mJR --------- Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre --- prdoc/pr_4312.prdoc | 19 +++++++++++++ substrate/frame/assets/src/impl_fungibles.rs | 16 +++++++++++ substrate/frame/assets/src/lib.rs | 4 +++ substrate/frame/assets/src/tests/sets.rs | 12 ++++++++ .../asset-conversion-tx-payment/src/tests.rs | 22 +++++++++++++++ .../asset-tx-payment/src/tests.rs | 28 +++++++++++++++++++ 6 files changed, 101 insertions(+) create mode 100644 prdoc/pr_4312.prdoc diff --git a/prdoc/pr_4312.prdoc b/prdoc/pr_4312.prdoc new file mode 100644 index 000000000000..d773edbd14de --- /dev/null +++ b/prdoc/pr_4312.prdoc @@ -0,0 +1,19 @@ +title: Add `Deposited`/`Withdrawn` events for `pallet-assets` + +doc: + - audience: Runtime Dev + description: | + New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. Make sure + to cover those events on tests if necessary. + - audience: Runtime User + description: | + New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. These indicate + a change in the balance of an account. + +crates: + - name: pallet-assets + bump: minor + - name: pallet-asset-tx-payment + bump: minor + - name: pallet-asset-conversion-tx-payment + bump: minor diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 9f837a604341..30122f6d788f 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -118,6 +118,22 @@ impl, I: 'static> fungibles::Balanced<::AccountI { type OnDropCredit = fungibles::DecreaseIssuance; type OnDropDebt = fungibles::IncreaseIssuance; + + fn done_deposit( + asset_id: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) { + Self::deposit_event(Event::Deposited { asset_id, who: who.clone(), amount }) + } + + fn done_withdraw( + asset_id: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) { + Self::deposit_event(Event::Withdrawn { asset_id, who: who.clone(), amount }) + } } impl, I: 'static> fungibles::Unbalanced for Pallet { diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 9056b1eefbdc..d52149225558 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -571,6 +571,10 @@ pub mod pallet { Touched { asset_id: T::AssetId, who: T::AccountId, depositor: T::AccountId }, /// Some account `who` was blocked. Blocked { asset_id: T::AssetId, who: T::AccountId }, + /// Some assets were deposited (e.g. for transaction fees). + Deposited { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, + /// Some assets were withdrawn from the account (e.g. for transaction fees). + Withdrawn { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, } #[pallet::error] diff --git a/substrate/frame/assets/src/tests/sets.rs b/substrate/frame/assets/src/tests/sets.rs index f85a736c0832..4d75b8aeab2c 100644 --- a/substrate/frame/assets/src/tests/sets.rs +++ b/substrate/frame/assets/src/tests/sets.rs @@ -90,6 +90,12 @@ fn deposit_from_set_types_works() { assert_eq!(First::::balance((), &account2), 50); assert_eq!(First::::total_issuance(()), 100); + System::assert_has_event(RuntimeEvent::Assets(crate::Event::Deposited { + asset_id: asset1, + who: account2, + amount: 50, + })); + assert_eq!(imb.peek(), 50); let (imb1, imb2) = imb.split(30); @@ -336,6 +342,12 @@ fn withdraw_from_set_types_works() { assert_eq!(First::::balance((), &account2), 50); assert_eq!(First::::total_issuance(()), 200); + System::assert_has_event(RuntimeEvent::Assets(crate::Event::Withdrawn { + asset_id: asset1, + who: account2, + amount: 50, + })); + assert_eq!(imb.peek(), 50); drop(imb); assert_eq!(First::::total_issuance(()), 150); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 62faed269d37..aa2f26f3a6a8 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -201,6 +201,8 @@ fn transaction_payment_in_asset_possible() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -246,6 +248,12 @@ fn transaction_payment_in_asset_possible() { // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_in_asset, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(WEIGHT_5), // estimated tx weight @@ -385,6 +393,8 @@ fn asset_transaction_payment_with_tip_and_refund() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -434,6 +444,12 @@ fn asset_transaction_payment_with_tip_and_refund() { ) .unwrap(); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_in_asset, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(WEIGHT_100), @@ -451,6 +467,12 @@ fn asset_transaction_payment_with_tip_and_refund() { balance - fee_in_asset + expected_token_refund ); assert_eq!(Balances::free_balance(caller), 20 * balance_factor); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: caller, + amount: expected_token_refund, + })); }); } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 8df98ceda997..098ecf11dd92 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -157,6 +157,8 @@ fn transaction_payment_in_asset_possible() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -188,6 +190,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(Weight::from_parts(weight, 0)), @@ -198,6 +206,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), fee); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: BLOCK_AUTHOR, + amount: fee, + })); }); } @@ -263,6 +277,8 @@ fn asset_transaction_payment_with_tip_and_refund() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -292,6 +308,12 @@ fn asset_transaction_payment_with_tip_and_refund() { .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_with_tip, + })); + let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), @@ -304,6 +326,12 @@ fn asset_transaction_payment_with_tip_and_refund() { fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: caller, + amount: fee_with_tip - final_fee, + })); }); } From 31dc8bb1de9a73c57863c4698ea23559ef729f67 Mon Sep 17 00:00:00 2001 From: gupnik Date: Tue, 30 Apr 2024 11:09:08 +0530 Subject: [PATCH 25/27] Improvements in minimal template (#4119) This PR makes a few improvements in the docs for the minimal template. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 13 +++ Cargo.toml | 1 + .../src/construct_runtime/expand/call.rs | 1 + .../procedural/src/construct_runtime/mod.rs | 2 + .../procedural/src/construct_runtime/parse.rs | 3 + .../src/pallet/expand/tt_default_parts.rs | 4 +- .../procedural/src/runtime/expand/mod.rs | 4 +- .../procedural/src/runtime/parse/pallet.rs | 4 + .../src/runtime/parse/pallet_decl.rs | 7 +- templates/minimal/Cargo.toml | 25 ++++++ templates/minimal/README.md | 13 +++ templates/minimal/runtime/src/lib.rs | 79 +++++++++++++++---- templates/minimal/src/lib.rs | 75 ++++++++++++++++++ 13 files changed, 210 insertions(+), 21 deletions(-) create mode 100644 templates/minimal/Cargo.toml create mode 100644 templates/minimal/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 67b0ad4def24..1fe4012070a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8474,6 +8474,19 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "minimal-template" +version = "0.0.0" +dependencies = [ + "docify", + "minimal-template-node", + "minimal-template-runtime", + "pallet-minimal-template", + "polkadot-sdk-docs", + "polkadot-sdk-frame", + "simple-mermaid", +] + [[package]] name = "minimal-template-node" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 42a6bc8abe1e..1d3f3d8e9ecd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -513,6 +513,7 @@ members = [ "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", + "templates/minimal", "templates/minimal/node", "templates/minimal/pallets/template", "templates/minimal/runtime", diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index b0041ccc0754..f055e8ce28e9 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -66,6 +66,7 @@ pub fn expand_outer_dispatch( quote! { #( #query_call_part_macros )* + /// The aggregated runtime call type. #[derive( Clone, PartialEq, Eq, #scrate::__private::codec::Encode, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index b083abbb2a8d..1505d158895f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -533,6 +533,7 @@ pub(crate) fn decl_all_pallets<'a>( for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; + let docs = &pallet_declaration.docs; let mut generics = vec![quote!(#runtime)]; generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); let mut attrs = Vec::new(); @@ -541,6 +542,7 @@ pub(crate) fn decl_all_pallets<'a>( attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); } let type_decl = quote!( + #( #[doc = #docs] )* #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 31866c787b0f..ded77bed4c8e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -605,6 +605,8 @@ pub struct Pallet { pub pallet_parts: Vec, /// Expressions specified inside of a #[cfg] attribute. pub cfg_pattern: Vec, + /// The doc literals + pub docs: Vec, } impl Pallet { @@ -774,6 +776,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 99364aaa96cd..1975f059152c 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -198,9 +198,9 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #default_parts_unique_id_v2 { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support)*::__private::tt_return! { + $my_tt_return! { $caller tokens = [{ + Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2 diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 011f69f37147..43f11896808c 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -93,7 +93,7 @@ fn construct_runtime_implicit_to_explicit( let frame_support = generate_access_from_frame_or_crate("frame-support")?; let attr = if legacy_ordering { quote!((legacy_ordering)) } else { quote!() }; let mut expansion = quote::quote!( - #[frame_support::runtime #attr] + #[#frame_support::runtime #attr] #input ); for pallet in definition.pallet_decls.iter() { @@ -103,7 +103,7 @@ fn construct_runtime_implicit_to_explicit( expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts_v2 }] - frame_support = [{ #frame_support }] + your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] pattern = [{ #pallet_name = #pallet_path #pallet_instance }] diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index d2f1857fb2b4..09f5290541d3 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; @@ -86,6 +87,8 @@ impl Pallet { let cfg_pattern = vec![]; + let docs = get_doc_literals(&item.attrs); + Ok(Pallet { is_expanded: true, name, @@ -94,6 +97,7 @@ impl Pallet { instance, cfg_pattern, pallet_parts, + docs, }) } } diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs index 437a163cfbc4..e167d37d5f14 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -21,13 +21,14 @@ use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { - /// The name of the pallet, e.g.`System` in `System: frame_system`. + /// The name of the pallet, e.g.`System` in `pub type System = frame_system`. pub name: Ident, /// Optional attributes tagged right above a pallet declaration. pub attrs: Vec, - /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`. pub path: syn::Path, - /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + /// The instance of the pallet, e.g. `Instance1` in `pub type Council = + /// pallet_collective`. pub instance: Option, } diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml new file mode 100644 index 000000000000..6cd28c5a4936 --- /dev/null +++ b/templates/minimal/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "minimal-template" +description = "A minimal template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +minimal-template-node = { path = "./node" } +minimal-template-runtime = { path = "./runtime" } +pallet-minimal-template = { path = "./pallets/template" } +polkadot-sdk-docs = { path = "../../docs/sdk" } + +frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame" } + +# How we build docs in rust-docs +simple-mermaid = "0.1.1" +docify = "0.2.7" diff --git a/templates/minimal/README.md b/templates/minimal/README.md index e69de29bb2d1..0541e393db93 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -0,0 +1,13 @@ +# Minimal Template + +This is a minimal template for creating a blockchain using the Polkadot SDK. + +# Docs + +You can generate and view the [Rust +Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template +with this command: + +```sh +cargo doc -p minimal-template --open +``` diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 794f30a054a8..d2debbf5689f 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! A minimal runtime that includes the template [`pallet`](`pallet_minimal_template`). + #![cfg_attr(not(feature = "std"), no_std)] // Make the WASM binary available. @@ -24,6 +26,7 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use frame::{ deps::frame_support::{ genesis_builder_helper::{build_state, get_preset}, + runtime, weights::{FixedFee, NoFee}, }, prelude::*, @@ -36,6 +39,7 @@ use frame::{ }, }; +/// The runtime version. #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("minimal-template-runtime"), @@ -54,61 +58,108 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// The signed extensions that are added to the runtime. type SignedExtra = ( + // Checks that the sender is not the zero address. frame_system::CheckNonZeroSender, + // Checks that the runtime version is correct. frame_system::CheckSpecVersion, + // Checks that the transaction version is correct. frame_system::CheckTxVersion, + // Checks that the genesis hash is correct. frame_system::CheckGenesis, + // Checks that the era is valid. frame_system::CheckEra, + // Checks that the nonce is valid. frame_system::CheckNonce, + // Checks that the weight is valid. frame_system::CheckWeight, + // Ensures that the sender has enough funds to pay for the transaction + // and deducts the fee from the sender's account. pallet_transaction_payment::ChargeTransactionPayment, ); -construct_runtime!( - pub enum Runtime { - System: frame_system, - Timestamp: pallet_timestamp, - - Balances: pallet_balances, - Sudo: pallet_sudo, - TransactionPayment: pallet_transaction_payment, - - // our local pallet - Template: pallet_minimal_template, - } -); +// Composes the runtime by adding all the used pallets and deriving necessary types. +#[runtime] +mod runtime { + /// The main runtime type. + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + /// Mandatory system pallet that should always be included in a FRAME runtime. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + /// Provides a way for consensus systems to set and check the onchain time. + #[runtime::pallet_index(1)] + pub type Timestamp = pallet_timestamp; + + /// Provides the ability to keep track of balances. + #[runtime::pallet_index(2)] + pub type Balances = pallet_balances; + + /// Provides a way to execute privileged functions. + #[runtime::pallet_index(3)] + pub type Sudo = pallet_sudo; + + /// Provides the ability to charge for extrinsic execution. + #[runtime::pallet_index(4)] + pub type TransactionPayment = pallet_transaction_payment; + + /// A minimal pallet template. + #[runtime::pallet_index(5)] + pub type Template = pallet_minimal_template; +} parameter_types! { pub const Version: RuntimeVersion = VERSION; } +/// Implements the types required for the system pallet. #[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type Version = Version; - type BlockHashCount = ConstU32<1024>; + // Use the account data from the balances pallet type AccountData = pallet_balances::AccountData<::Balance>; } +// Implements the types required for the balances pallet. #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type AccountStore = System; } +// Implements the types required for the sudo pallet. #[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)] impl pallet_sudo::Config for Runtime {} +// Implements the types required for the sudo pallet. #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] impl pallet_timestamp::Config for Runtime {} +// Implements the types required for the transaction payment pallet. #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + // Setting fee as independent of the weight of the extrinsic for demo purposes type WeightToFee = NoFee<::Balance>; + // Setting fee as fixed for any length of the call data for demo purposes type LengthToFee = FixedFee<1, ::Balance>; } +// Implements the types required for the template pallet. impl pallet_minimal_template::Config for Runtime {} type Block = frame::runtime::types_common::BlockOf; diff --git a/templates/minimal/src/lib.rs b/templates/minimal/src/lib.rs new file mode 100644 index 000000000000..68825d190bb2 --- /dev/null +++ b/templates/minimal/src/lib.rs @@ -0,0 +1,75 @@ +//! # Minimal Template +//! +//! This is a minimal template for creating a blockchain using the Polkadot SDK. +//! +//! ## Components +//! +//! The template consists of the following components: +//! +//! ### Node +//! +//! A minimal blockchain [`node`](`minimal_template_node`) that is capable of running a +//! runtime. It uses a simple chain specification, provides an option to choose Manual or +//! InstantSeal for consensus and exposes a few commands to interact with the node. +//! +//! ### Runtime +//! +//! A minimal [`runtime`](`minimal_template_runtime`) (or a state transition function) that +//! is capable of being run on the node. It is built using the [`FRAME`](`frame`) framework +//! that enables the composition of the core logic via separate modules called "pallets". +//! FRAME defines a complete DSL for building such pallets and the runtime itself. +//! +//! #### Transaction Fees +//! +//! The runtime charges a transaction fee for every transaction that is executed. The fee is +//! calculated based on the weight of the transaction (accouting for the execution time) and +//! length of the call data. Please refer to +//! [`benchmarking docs`](`polkadot_sdk_docs::reference_docs::frame_benchmarking_weight`) for +//! more information on how the weight is calculated. +//! +//! This template sets the fee as independent of the weight of the extrinsic and fixed for any +//! length of the call data for demo purposes. +//! +//! ### Pallet +//! +//! A minimal [`pallet`](`pallet_minimal_template`) that is built using FRAME. It is a unit of +//! encapsulated logic that has a clearly defined responsibility and can be linked to other pallets. +//! +//! ## Getting Started +//! +//! To get started with the template, follow the steps below: +//! +//! ### Build the Node +//! +//! Build the node using the following command: +//! +//! ```bash +//! cargo build -p minimal-template-node --release +//! ``` +//! +//! ### Run the Node +//! +//! Run the node using the following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --dev +//! ``` +//! +//! ### CLI Options +//! +//! The node exposes a few options that can be used to interact with the node. To see the list of +//! available options, run the following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --help +//! ``` +//! +//! #### Consensus Algorithm +//! +//! In order to run the node with a specific consensus algorithm, use the `--consensus` flag. For +//! example, to run the node with ManualSeal consensus with a block time of 5000ms, use the +//! following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --dev --consensus manual-seal-5000 +//! ``` From b8593ccd1bddacc87a11559fe845db43b7f4ec6d Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 30 Apr 2024 16:42:50 +0300 Subject: [PATCH 26/27] BEEFY: Define basic fisherman (#4328) Related to https://github.com/paritytech/polkadot-sdk/pull/1903 For #1903 we will need to add a Fisherman struct. This PR: - defines a basic version of `Fisherman` and moves into it the logic that we have now for reporting double voting equivocations - splits the logic for generating the key ownership proofs into a more generic separate method - renames `EquivocationProof` to `DoubleVotingProof` since later we will introduce a new type of equivocation The PR doesn't contain any functional changes --- polkadot/node/service/src/fake_runtime_api.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/client/consensus/beefy/Cargo.toml | 2 +- .../client/consensus/beefy/src/fisherman.rs | 162 ++++++++++++++++++ substrate/client/consensus/beefy/src/lib.rs | 19 +- substrate/client/consensus/beefy/src/round.rs | 10 +- substrate/client/consensus/beefy/src/tests.rs | 6 +- .../client/consensus/beefy/src/worker.rs | 93 +++------- substrate/frame/beefy/src/equivocation.rs | 4 +- substrate/frame/beefy/src/lib.rs | 8 +- .../primitives/consensus/beefy/src/lib.rs | 8 +- .../consensus/beefy/src/test_utils.rs | 6 +- 15 files changed, 225 insertions(+), 103 deletions(-) create mode 100644 substrate/client/consensus/beefy/src/fisherman.rs diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index c6cfb7a27d04..89613040dca1 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -242,7 +242,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - _: beefy_primitives::EquivocationProof< + _: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1cfe9adfe13d..287ae9937da4 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2018,7 +2018,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::EquivocationProof< + equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index d0f1ff0035fc..87becf73cb74 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1009,7 +1009,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - _equivocation_proof: beefy_primitives::EquivocationProof< + _equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index de961bb4c398..7125f5d34c40 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1966,7 +1966,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::EquivocationProof< + equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 5d8016532a5d..18b0d0c31a4d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -3053,7 +3053,7 @@ impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: sp_consensus_beefy::EquivocationProof< + equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 7b61b3c6c01f..435604a9473b 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -39,7 +39,6 @@ sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } sp-core = { path = "../../../primitives/core" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-keystore = { path = "../../../primitives/keystore" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } tokio = "1.37" @@ -51,6 +50,7 @@ sc-block-builder = { path = "../../block-builder" } sc-network-test = { path = "../../network/test" } sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } sp-keyring = { path = "../../../primitives/keyring" } +sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-tracing = { path = "../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs new file mode 100644 index 000000000000..a2b4c8f945d1 --- /dev/null +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{error::Error, keystore::BeefyKeystore, round::Rounds, LOG_TARGET}; +use log::{debug, error, warn}; +use sc_client_api::Backend; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_consensus_beefy::{ + check_equivocation_proof, + ecdsa_crypto::{AuthorityId, Signature}, + BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block, NumberFor}, +}; +use std::{marker::PhantomData, sync::Arc}; + +/// Helper struct containing the id and the key ownership proof for a validator. +pub struct ProvedValidator<'a> { + pub id: &'a AuthorityId, + pub key_owner_proof: OpaqueKeyOwnershipProof, +} + +/// Helper used to check and report equivocations. +pub struct Fisherman { + backend: Arc, + runtime: Arc, + key_store: Arc>, + + _phantom: PhantomData, +} + +impl, RuntimeApi: ProvideRuntimeApi> Fisherman +where + RuntimeApi::Api: BeefyApi, +{ + pub fn new( + backend: Arc, + runtime: Arc, + keystore: Arc>, + ) -> Self { + Self { backend, runtime, key_store: keystore, _phantom: Default::default() } + } + + fn prove_offenders<'a>( + &self, + at: BlockId, + offender_ids: impl Iterator, + validator_set_id: ValidatorSetId, + ) -> Result>, Error> { + let hash = match at { + BlockId::Hash(hash) => hash, + BlockId::Number(number) => self + .backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(number)) + .map_err(|err| { + Error::Backend(format!( + "Couldn't get hash for block #{:?} (error: {:?}). \ + Skipping report for equivocation", + at, err + )) + })?, + }; + + let runtime_api = self.runtime.runtime_api(); + let mut proved_offenders = vec![]; + for offender_id in offender_ids { + match runtime_api.generate_key_ownership_proof( + hash, + validator_set_id, + offender_id.clone(), + ) { + Ok(Some(key_owner_proof)) => { + proved_offenders.push(ProvedValidator { id: offender_id, key_owner_proof }); + }, + Ok(None) => { + debug!( + target: LOG_TARGET, + "🥩 Equivocation offender {} not part of the authority set {}.", + offender_id, validator_set_id + ); + }, + Err(e) => { + error!( + target: LOG_TARGET, + "🥩 Error generating key ownership proof for equivocation offender {} \ + in authority set {}: {}", + offender_id, validator_set_id, e + ); + }, + }; + } + + Ok(proved_offenders) + } + + /// Report the given equivocation to the BEEFY runtime module. This method + /// generates a session membership proof of the offender and then submits an + /// extrinsic to report the equivocation. In particular, the session membership + /// proof must be generated at the block at which the given set was active which + /// isn't necessarily the best block if there are pending authority set changes. + pub fn report_double_voting( + &self, + proof: DoubleVotingProof, AuthorityId, Signature>, + active_rounds: &Rounds, + ) -> Result<(), Error> { + let (validators, validator_set_id) = + (active_rounds.validators(), active_rounds.validator_set_id()); + let offender_id = proof.offender_id(); + + if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { + debug!(target: LOG_TARGET, "🥩 Skipping report for bad equivocation {:?}", proof); + return Ok(()) + } + + if let Some(local_id) = self.key_store.authority_id(validators) { + if offender_id == &local_id { + warn!(target: LOG_TARGET, "🥩 Skipping report for own equivocation"); + return Ok(()) + } + } + + let key_owner_proofs = self.prove_offenders( + BlockId::Number(*proof.round_number()), + vec![offender_id].into_iter(), + validator_set_id, + )?; + + // submit equivocation report at **best** block + let best_block_hash = self.backend.blockchain().info().best_hash; + for ProvedValidator { key_owner_proof, .. } in key_owner_proofs { + self.runtime + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + best_block_hash, + proof.clone(), + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + } + + Ok(()) + } +} diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 2637481fbf3e..0e49839f0fd2 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -43,11 +43,10 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet, + ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; -use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{BTreeMap, VecDeque}, @@ -69,6 +68,7 @@ pub mod justification; use crate::{ communication::gossip::GossipValidator, + fisherman::Fisherman, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metrics::VoterMetrics, @@ -80,6 +80,7 @@ pub use communication::beefy_protocol_name::{ }; use sp_runtime::generic::OpaqueDigestItemId; +mod fisherman; #[cfg(test)] mod tests; @@ -305,14 +306,16 @@ where pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, is_authority: bool, ) -> BeefyWorker { + let key_store = Arc::new(self.key_store); BeefyWorker { - backend: self.backend, - runtime: self.runtime, - key_store: self.key_store, - metrics: self.metrics, - persisted_state: self.persisted_state, + backend: self.backend.clone(), + runtime: self.runtime.clone(), + key_store: key_store.clone(), payload_provider, sync, + fisherman: Arc::new(Fisherman::new(self.backend, self.runtime, key_store)), + metrics: self.metrics, + persisted_state: self.persisted_state, comms, links, pending_justifications, @@ -487,7 +490,7 @@ pub async fn start_beefy_gadget( C: Client + BlockBackend, P: PayloadProvider + Clone, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi>, + R::Api: BeefyApi, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, S: GossipSyncing + SyncOracle + 'static, { diff --git a/substrate/client/consensus/beefy/src/round.rs b/substrate/client/consensus/beefy/src/round.rs index 0045dc70c260..5dae80cb1830 100644 --- a/substrate/client/consensus/beefy/src/round.rs +++ b/substrate/client/consensus/beefy/src/round.rs @@ -22,7 +22,7 @@ use codec::{Decode, Encode}; use log::{debug, info}; use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId, Signature}, - Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, + Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; use std::collections::BTreeMap; @@ -61,7 +61,7 @@ pub fn threshold(authorities: usize) -> usize { pub enum VoteImportResult { Ok, RoundConcluded(SignedCommitment, Signature>), - Equivocation(EquivocationProof, AuthorityId, Signature>), + DoubleVoting(DoubleVotingProof, AuthorityId, Signature>), Invalid, Stale, } @@ -153,7 +153,7 @@ where target: LOG_TARGET, "🥩 detected equivocated vote: 1st: {:?}, 2nd: {:?}", previous_vote, vote ); - return VoteImportResult::Equivocation(EquivocationProof { + return VoteImportResult::DoubleVoting(DoubleVotingProof { first: previous_vote.clone(), second: vote, }) @@ -207,7 +207,7 @@ mod tests { use sc_network_test::Block; use sp_consensus_beefy::{ - known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, EquivocationProof, Payload, + known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, DoubleVotingProof, Payload, SignedCommitment, ValidatorSet, VoteMessage, }; @@ -494,7 +494,7 @@ mod tests { let mut alice_vote2 = alice_vote1.clone(); alice_vote2.commitment = commitment2; - let expected_result = VoteImportResult::Equivocation(EquivocationProof { + let expected_result = VoteImportResult::DoubleVoting(DoubleVotingProof { first: alice_vote1.clone(), second: alice_vote2.clone(), }); diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 9b13d1da6d7d..2bb145d660df 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -59,7 +59,7 @@ use sp_consensus_beefy::{ known_payloads, mmr::{find_mmr_root_digest, MmrRootProvider}, test_utils::Keyring as BeefyKeyring, - BeefyApi, Commitment, ConsensusLog, EquivocationProof, MmrRootHash, OpaqueKeyOwnershipProof, + BeefyApi, Commitment, ConsensusLog, DoubleVotingProof, MmrRootHash, OpaqueKeyOwnershipProof, Payload, SignedCommitment, ValidatorSet, ValidatorSetId, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; @@ -259,7 +259,7 @@ pub(crate) struct TestApi { pub validator_set: Option, pub mmr_root_hash: MmrRootHash, pub reported_equivocations: - Option, AuthorityId, Signature>>>>>, + Option, AuthorityId, Signature>>>>>, } impl TestApi { @@ -313,7 +313,7 @@ sp_api::mock_impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - proof: EquivocationProof, AuthorityId, Signature>, + proof: DoubleVotingProof, AuthorityId, Signature>, _dummy: OpaqueKeyOwnershipProof, ) -> Option<()> { if let Some(equivocations_buf) = self.inner.reported_equivocations.as_ref() { diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 05575ae01c30..cfbb3d63aea4 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -23,6 +23,7 @@ use crate::{ }, error::Error, find_authorities_change, + fisherman::Fisherman, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, @@ -39,10 +40,9 @@ use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ - check_equivocation_proof, ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSet, - VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, + BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, VersionedFinalityProof, + VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ generic::BlockId, @@ -377,9 +377,10 @@ pub(crate) struct BeefyWorker { // utilities pub backend: Arc, pub runtime: Arc, - pub key_store: BeefyKeystore, + pub key_store: Arc>, pub payload_provider: P, pub sync: Arc, + pub fisherman: Arc>, // communication (created once, but returned and reused if worker is restarted/reinitialized) pub comms: BeefyComms, @@ -590,9 +591,9 @@ where } metric_inc!(self.metrics, beefy_good_votes_processed); }, - VoteImportResult::Equivocation(proof) => { + VoteImportResult::DoubleVoting(proof) => { metric_inc!(self.metrics, beefy_equivocation_votes); - self.report_equivocation(proof)?; + self.report_double_voting(proof)?; }, VoteImportResult::Invalid => metric_inc!(self.metrics, beefy_invalid_votes), VoteImportResult::Stale => metric_inc!(self.metrics, beefy_stale_votes), @@ -941,64 +942,13 @@ where (error, self.comms) } - /// Report the given equivocation to the BEEFY runtime module. This method - /// generates a session membership proof of the offender and then submits an - /// extrinsic to report the equivocation. In particular, the session membership - /// proof must be generated at the block at which the given set was active which - /// isn't necessarily the best block if there are pending authority set changes. - pub(crate) fn report_equivocation( + /// Report the given equivocation to the BEEFY runtime module. + fn report_double_voting( &self, - proof: EquivocationProof, AuthorityId, Signature>, + proof: DoubleVotingProof, AuthorityId, Signature>, ) -> Result<(), Error> { let rounds = self.persisted_state.voting_oracle.active_rounds()?; - let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); - let offender_id = proof.offender_id().clone(); - - if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { - debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof); - return Ok(()) - } else if let Some(local_id) = self.key_store.authority_id(validators) { - if offender_id == local_id { - warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); - return Ok(()) - } - } - - let number = *proof.round_number(); - let hash = self - .backend - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(number)) - .map_err(|err| { - let err_msg = format!( - "Couldn't get hash for block #{:?} (error: {:?}), skipping report for equivocation", - number, err - ); - Error::Backend(err_msg) - })?; - let runtime_api = self.runtime.runtime_api(); - // generate key ownership proof at that block - let key_owner_proof = match runtime_api - .generate_key_ownership_proof(hash, validator_set_id, offender_id) - .map_err(Error::RuntimeApi)? - { - Some(proof) => proof, - None => { - debug!( - target: LOG_TARGET, - "🥩 Equivocation offender not part of the authority set." - ); - return Ok(()) - }, - }; - - // submit equivocation report at **best** block - let best_block_hash = self.backend.blockchain().info().best_hash; - runtime_api - .submit_report_equivocation_unsigned_extrinsic(best_block_hash, proof, key_owner_proof) - .map_err(Error::RuntimeApi)?; - - Ok(()) + self.fisherman.report_double_voting(proof, rounds) } } @@ -1165,13 +1115,15 @@ pub(crate) mod tests { .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); let comms = BeefyComms { gossip_engine, gossip_validator, on_demand_justifications }; + let key_store: Arc> = Arc::new(Some(keystore).into()); BeefyWorker { - backend, - runtime: api, - key_store: Some(keystore).into(), + backend: backend.clone(), + runtime: api.clone(), + key_store: key_store.clone(), metrics, payload_provider, sync: Arc::new(sync), + fisherman: Arc::new(Fisherman::new(backend, api, key_store)), links, comms, pending_justifications: BTreeMap::new(), @@ -1590,6 +1542,11 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); worker.runtime = api_alice.clone(); + worker.fisherman = Arc::new(Fisherman::new( + worker.backend.clone(), + worker.runtime.clone(), + worker.key_store.clone(), + )); // let there be a block with num = 1: let _ = net.peer(0).push_blocks(1, false); @@ -1604,7 +1561,7 @@ pub(crate) mod tests { ); { // expect voter (Alice) to successfully report it - assert_eq!(worker.report_equivocation(good_proof.clone()), Ok(())); + assert_eq!(worker.report_double_voting(good_proof.clone()), Ok(())); // verify Alice reports Bob equivocation to runtime let reported = api_alice.reported_equivocations.as_ref().unwrap().lock(); assert_eq!(reported.len(), 1); @@ -1616,7 +1573,7 @@ pub(crate) mod tests { let mut bad_proof = good_proof.clone(); bad_proof.first.id = Keyring::Charlie.public(); // bad proofs are simply ignored - assert_eq!(worker.report_equivocation(bad_proof), Ok(())); + assert_eq!(worker.report_double_voting(bad_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); @@ -1625,7 +1582,7 @@ pub(crate) mod tests { old_proof.first.commitment.validator_set_id = 0; old_proof.second.commitment.validator_set_id = 0; // old proofs are simply ignored - assert_eq!(worker.report_equivocation(old_proof), Ok(())); + assert_eq!(worker.report_double_voting(old_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); @@ -1635,7 +1592,7 @@ pub(crate) mod tests { (block_num, payload2.clone(), set_id, &Keyring::Alice), ); // equivocations done by 'self' are simply ignored (not reported) - assert_eq!(worker.report_equivocation(self_proof), Ok(())); + assert_eq!(worker.report_double_voting(self_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index bbc6eae6af29..aecc9e721d5c 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -38,7 +38,7 @@ use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; use frame_system::pallet_prelude::BlockNumberFor; use log::{error, info}; -use sp_consensus_beefy::{EquivocationProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; +use sp_consensus_beefy::{DoubleVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -123,7 +123,7 @@ pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, /// Equivocation evidence convenience alias. pub type EquivocationEvidenceFor = ( - EquivocationProof< + DoubleVotingProof< BlockNumberFor, ::BeefyId, <::BeefyId as RuntimeAppPublic>::Signature, diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 09cd13ab70a4..63f3e9bb309c 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -41,7 +41,7 @@ use sp_staking::{offence::OffenceReportSystem, SessionIndex}; use sp_std::prelude::*; use sp_consensus_beefy::{ - AuthorityIndex, BeefyAuthorityId, ConsensusLog, EquivocationProof, OnNewValidatorSet, + AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; @@ -210,7 +210,7 @@ pub mod pallet { pub fn report_equivocation( origin: OriginFor, equivocation_proof: Box< - EquivocationProof< + DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, @@ -245,7 +245,7 @@ pub mod pallet { pub fn report_equivocation_unsigned( origin: OriginFor, equivocation_proof: Box< - EquivocationProof< + DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, @@ -368,7 +368,7 @@ impl Pallet { /// an unsigned extrinsic with a call to `report_equivocation_unsigned` and /// will push the transaction to the pool. Only useful in an offchain context. pub fn submit_unsigned_equivocation_report( - equivocation_proof: EquivocationProof< + equivocation_proof: DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 6f644c5f790d..390c0ff71273 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -306,14 +306,14 @@ pub struct VoteMessage { /// BEEFY happens when a voter votes on the same round/block for different payloads. /// Proving is achieved by collecting the signed commitments of conflicting votes. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] -pub struct EquivocationProof { +pub struct DoubleVotingProof { /// The first vote in the equivocation. pub first: VoteMessage, /// The second vote in the equivocation. pub second: VoteMessage, } -impl EquivocationProof { +impl DoubleVotingProof { /// Returns the authority id of the equivocator. pub fn offender_id(&self) -> &Id { &self.first.id @@ -347,7 +347,7 @@ where /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. pub fn check_equivocation_proof( - report: &EquivocationProof::Signature>, + report: &DoubleVotingProof::Signature>, ) -> bool where Id: BeefyAuthorityId + PartialEq, @@ -437,7 +437,7 @@ sp_api::decl_runtime_apis! { /// hardcoded to return `None`). Only useful in an offchain context. fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: - EquivocationProof, AuthorityId, ::Signature>, + DoubleVotingProof, AuthorityId, ::Signature>, key_owner_proof: OpaqueKeyOwnershipProof, ) -> Option<()>; diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs index ec13c9c69004..d7fd49214f12 100644 --- a/substrate/primitives/consensus/beefy/src/test_utils.rs +++ b/substrate/primitives/consensus/beefy/src/test_utils.rs @@ -18,7 +18,7 @@ #[cfg(feature = "bls-experimental")] use crate::ecdsa_bls_crypto; use crate::{ - ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, EquivocationProof, Payload, + ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, Payload, ValidatorSetId, VoteMessage, }; use sp_application_crypto::{AppCrypto, AppPair, RuntimeAppPublic, Wraps}; @@ -140,7 +140,7 @@ impl From> for ecdsa_crypto::Public { pub fn generate_equivocation_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring), vote2: (u64, Payload, ValidatorSetId, &Keyring), -) -> EquivocationProof { +) -> DoubleVotingProof { let signed_vote = |block_number: u64, payload: Payload, validator_set_id: ValidatorSetId, @@ -151,5 +151,5 @@ pub fn generate_equivocation_proof( }; let first = signed_vote(vote1.0, vote1.1, vote1.2, vote1.3); let second = signed_vote(vote2.0, vote2.1, vote2.2, vote2.3); - EquivocationProof { first, second } + DoubleVotingProof { first, second } } From c973fe86f8c668462186c95655a58fda04508e9a Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 30 Apr 2024 16:29:14 +0200 Subject: [PATCH 27/27] Contracts: revert reverted changes from 4266 (#4277) revert some reverted changes from #4266 --- .../src/parachain/contracts_config.rs | 14 ++------ .../frame/contracts/mock-network/src/tests.rs | 25 +------------- substrate/frame/contracts/src/lib.rs | 3 ++ substrate/frame/contracts/src/wasm/runtime.rs | 33 ++----------------- 4 files changed, 9 insertions(+), 66 deletions(-) diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index 20fdd9a243d1..bf3c00b3ff1f 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -14,8 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeHoldReason}; -use frame_support::{derive_impl, parameter_types, traits::Contains}; +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::parachain::RuntimeHoldReason; +use frame_support::{derive_impl, parameter_types}; parameter_types! { pub Schedule: pallet_contracts::Schedule = Default::default(); @@ -28,14 +29,5 @@ impl pallet_contracts::Config for Runtime { type Currency = Balances; type Schedule = Schedule; type Time = super::Timestamp; - type CallFilter = CallFilter; type Xcm = pallet_xcm::Pallet; } - -/// In this mock, we only allow other contract calls via XCM. -pub struct CallFilter; -impl Contains for CallFilter { - fn contains(call: &RuntimeCall) -> bool { - matches!(call, RuntimeCall::Contracts(pallet_contracts::Call::call { .. })) - } -} diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index e7d1f6279aa3..48a94e172a02 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,10 +22,7 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::{ - assert_err, - traits::{fungibles::Mutate, Currency}, -}; +use frame_support::traits::{fungibles::Mutate, Currency}; use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; @@ -132,26 +129,6 @@ fn test_xcm_execute_incomplete() { }); } -#[test] -fn test_xcm_execute_filtered_call() { - MockNet::reset(); - - let contract_addr = instantiate_test_contract("xcm_execute"); - - ParaA::execute_with(|| { - // `remark` should be rejected, as it is not allowed by our CallFilter. - let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); - let message: Xcm = Xcm::builder_unsafe() - .transact(OriginKind::Native, Weight::MAX, call.encode()) - .build(); - let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode()) - .build() - .result; - assert_err!(result, frame_system::Error::::CallFiltered); - }); -} - #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 0045d72141c9..3e87eb9f37ea 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -307,6 +307,9 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. + /// + /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact + /// calls, you must configure them separately within the XCM pallet itself. #[pallet::no_default_bounds] type CallFilter: Contains<::RuntimeCall>; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 52ceda99edb7..3212aff31269 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,12 +25,8 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, - ensure, - pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, - parameter_types, - traits::Get, - weights::Weight, + dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, + traits::Get, weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -41,7 +37,6 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; -use xcm::VersionedXcm; type CallOf = ::RuntimeCall; @@ -378,29 +373,6 @@ fn already_charged(_: u32) -> Option { None } -/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] -/// instruction with a call that is not allowed by the CallFilter. -fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { - use frame_support::traits::Contains; - use xcm::prelude::{Transact, Xcm}; - - let mut message: Xcm> = - message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; - - message.iter_mut().try_for_each(|inst| -> DispatchResult { - let Transact { ref mut call, .. } = inst else { return Ok(()) }; - let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; - - if !::CallFilter::contains(call) { - return Err(frame_system::Error::::CallFiltered.into()) - } - - Ok(()) - })?; - - Ok(()) -} - /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2117,7 +2089,6 @@ pub mod env { ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let message: VersionedXcm> = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - ensure_executable::(&message)?; let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute();