diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4d17b70028810..a0b1464bae23e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,11 +48,11 @@ variables: CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" - # staging image with rust 1.65 and nightly-2022-11-16 - CI_IMAGE: "paritytech/ci-linux@sha256:786869e731963b3cc0a4aa9deb83367ed9e87a6ae48b6eb029d62b0cab4d87c1" + CI_IMAGE: "paritytech/ci-linux:production" BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27" RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" + RUSTY_CACHIER_COMPRESSION_METHOD: zstd ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.22" default: diff --git a/Cargo.lock b/Cargo.lock index c8d1d8d83f0d7..d333b79c8c403 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,9 +187,9 @@ checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" [[package]] name = "arc-swap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "array-bytes" @@ -1022,9 +1022,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "6.1.3" +version = "6.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e621e7e86c46fd8a14c32c6ae3cb95656621b4743a27d0cffedb831d46e7ad21" +checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d" dependencies = [ "strum", "strum_macros", @@ -3217,7 +3217,7 @@ checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" dependencies = [ "hermit-abi 0.2.6", "io-lifetimes 1.0.3", - "rustix 0.36.5", + "rustix 0.36.6", "windows-sys 0.42.0", ] @@ -3458,6 +3458,7 @@ dependencies = [ "pallet-message-queue", "pallet-mmr", "pallet-multisig", + "pallet-nfts", "pallet-nis", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", @@ -3706,9 +3707,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ee545eedf4f88502b2a4a2323405c3225d212d643212b0615856ca227fb9c3" +checksum = "2766dcd2be8c87d5e1f35487deb22d765f49c6ae1251b3633efe3b25698bd3d2" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -4272,7 +4273,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" dependencies = [ - "rustix 0.36.5", + "rustix 0.36.6", ] [[package]] @@ -4981,9 +4982,9 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "7.1.1" +version = "7.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" dependencies = [ "memchr", "minimal-lexical", @@ -5118,9 +5119,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "oorandom" @@ -5897,6 +5898,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nfts" +version = "4.0.0-dev" +dependencies = [ + "enumflags2", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-nicks" version = "4.0.0-dev" @@ -6788,9 +6807,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" +checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" dependencies = [ "thiserror", "ucd-trie", @@ -6798,9 +6817,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdc078600d06ff90d4ed238f0119d84ab5d43dbaad278b0e33a8820293b32344" +checksum = "96504449aa860c8dcde14f9fba5c58dc6658688ca1fe363589d6327b8662c603" dependencies = [ "pest", "pest_generator", @@ -6808,9 +6827,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a1af60b1c4148bb269006a750cff8e2ea36aff34d2d96cf7be0b14d1bed23c" +checksum = "798e0220d1111ae63d66cb66a5dcb3fc2d986d520b98e49e1852bfdb11d7c5e7" dependencies = [ "pest", "pest_meta", @@ -6821,9 +6840,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec8605d59fc2ae0c6c1aefc0c7c7a9769732017c0ce07f7a9cfffa7b4404f20" +checksum = "984298b75898e30a843e278a9f2452c31e349a073a0ce6fd950a12a74464e065" dependencies = [ "once_cell", "pest", @@ -6990,9 +7009,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.4" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54fc5dc63ed3bbf19494623db4f3af16842c0d975818e469022d09e53f0aa05" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", @@ -7668,9 +7687,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.5" +version = "0.36.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3807b5d10909833d3e9acd1eb5fb988f79376ff10fce42937de71a449c4c588" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" dependencies = [ "bitflags", "errno", @@ -7916,6 +7935,7 @@ dependencies = [ "sp-keystore", "sp-panic-handler", "sp-runtime", + "sp-tracing", "sp-version", "tempfile", "thiserror", @@ -8416,6 +8436,7 @@ dependencies = [ "assert_matches", "async-trait", "asynchronous-codec", + "backtrace", "bytes", "either", "fnv", @@ -8627,6 +8648,7 @@ dependencies = [ "pin-project", "sc-network-common", "sc-peerset", + "sc-utils", "sp-consensus", "sp-runtime", "substrate-prometheus-endpoint", @@ -8963,6 +8985,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project", "rand 0.8.5", + "sc-utils", "serde", "serde_json", "thiserror", @@ -9061,6 +9084,7 @@ dependencies = [ name = "sc-utils" version = "4.0.0-dev" dependencies = [ + "backtrace", "futures", "futures-timer", "lazy_static", @@ -9267,9 +9291,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.151" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fed41fc1a24994d044e6db6935e69511a1153b52c15eb42493b26fa87feba0" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] @@ -9286,9 +9310,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.151" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "255abe9a125a985c05190d687b320c12f9b1f0b99445e608c21ba0782c719ad8" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -11344,6 +11368,7 @@ dependencies = [ "sc-executor", "sc-service", "serde", + "serde_json", "sp-api", "sp-core", "sp-debug-derive", diff --git a/Cargo.toml b/Cargo.toml index eb78d5e104486..8f55d8e527ecd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,6 +122,7 @@ members = [ "frame/preimage", "frame/proxy", "frame/message-queue", + "frame/nfts", "frame/nomination-pools", "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 358c09779d59a..c8f9e2b3d3c82 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -161,6 +161,7 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { let line = line.expect("failed to obtain next line from stdout for WS address discovery"); data.push_str(&line); + data.push_str("\n"); // does the line contain our port (we expect this specific output from substrate). let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") { @@ -170,7 +171,10 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { Some(format!("ws://{}", sock_addr)) }) - .expect("We should get a WebSocket address"); + .unwrap_or_else(|| { + eprintln!("Observed node output:\n{}", data); + panic!("We should get a WebSocket address") + }); (ws_url, data) } diff --git a/bin/node/cli/tests/remember_state_pruning_works.rs b/bin/node/cli/tests/remember_state_pruning_works.rs new file mode 100644 index 0000000000000..5b8e34cc7a00d --- /dev/null +++ b/bin/node/cli/tests/remember_state_pruning_works.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use tempfile::tempdir; + +pub mod common; + +#[tokio::test] +#[cfg(unix)] +async fn remember_state_pruning_works() { + let base_path = tempdir().expect("could not create a temp dir"); + + // First run with `--state-pruning=archive`. + common::run_node_for_a_while( + base_path.path(), + &["--dev", "--state-pruning=archive", "--no-hardware-benchmarks"], + ) + .await; + + // Then run again without specifying the state pruning. + // This should load state pruning settings from the db. + common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; +} diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index c88703b929e6b..02b2a8787b5d5 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -30,7 +30,7 @@ use sp_runtime::{ use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, CheckedExtrinsic, Header, Runtime, RuntimeCall, RuntimeEvent, System, - TransactionPayment, UncheckedExtrinsic, + TransactionPayment, Treasury, UncheckedExtrinsic, }; use node_primitives::{Balance, Hash}; use node_testing::keyring::*; @@ -398,6 +398,7 @@ fn full_native_block_import_works() { }); fees = t.execute_with(|| transfer_fee(&xt())); + let pot = t.execute_with(|| Treasury::pot()); executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); @@ -408,6 +409,14 @@ fn full_native_block_import_works() { ); assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees); let events = vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Treasury(pallet_treasury::Event::UpdatedInactive { + reactivated: 0, + deactivated: pot, + }), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(0), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 477545c9ac332..201e3a85f8941 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -78,6 +78,7 @@ pallet-membership = { version = "4.0.0-dev", default-features = false, path = ". pallet-message-queue = { version = "7.0.0-dev", default-features = false, path = "../../../frame/message-queue" } pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } +pallet-nfts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/nfts" } pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../../../frame/nomination-pools"} pallet-nomination-pools-benchmarking = { version = "1.0.0", default-features = false, optional = true, path = "../../../frame/nomination-pools/benchmarking" } pallet-nomination-pools-runtime-api = { version = "1.0.0-dev", default-features = false, path = "../../../frame/nomination-pools/runtime-api" } @@ -197,6 +198,7 @@ std = [ "pallet-root-testing/std", "pallet-recovery/std", "pallet-uniques/std", + "pallet-nfts/std", "pallet-vesting/std", "log/std", "frame-try-runtime?/std", @@ -253,6 +255,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", + "pallet-nfts/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", @@ -312,6 +315,7 @@ try-runtime = [ "pallet-asset-tx-payment/try-runtime", "pallet-transaction-storage/try-runtime", "pallet-uniques/try-runtime", + "pallet-nfts/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e5776e3fd692c..0c946fa180f20 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -56,6 +56,7 @@ use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; use pallet_session::historical::{self as pallet_session_historical}; pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; @@ -301,6 +302,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::Balances(..) | RuntimeCall::Assets(..) | RuntimeCall::Uniques(..) | + RuntimeCall::Nfts(..) | RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) | RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) ), @@ -565,7 +567,7 @@ impl pallet_staking::Config for Runtime { type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; /// A super-majority of the council can cancel the slash. - type SlashCancelOrigin = EitherOfDiverse< + type AdminOrigin = EitherOfDiverse< EnsureRoot, pallet_collective::EnsureProportionAtLeast, >; @@ -1528,6 +1530,10 @@ parameter_types! { pub const ItemDeposit: Balance = 1 * DOLLARS; pub const KeyLimit: u32 = 32; pub const ValueLimit: u32 = 256; + pub const ApprovalsLimit: u32 = 20; + pub const ItemAttributesApprovalsLimit: u32 = 20; + pub const MaxTips: u32 = 10; + pub const MaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; } impl pallet_uniques::Config for Runtime { @@ -1551,6 +1557,36 @@ impl pallet_uniques::Config for Runtime { type Locker = (); } +parameter_types! { + pub Features: PalletFeatures = PalletFeatures::all_enabled(); +} + +impl pallet_nfts::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type CollectionDeposit = CollectionDeposit; + type ItemDeposit = ItemDeposit; + type MetadataDepositBase = MetadataDepositBase; + type AttributeDepositBase = MetadataDepositBase; + type DepositPerByte = MetadataDepositPerByte; + type StringLimit = StringLimit; + type KeyLimit = KeyLimit; + type ValueLimit = ValueLimit; + type ApprovalsLimit = ApprovalsLimit; + type ItemAttributesApprovalsLimit = ItemAttributesApprovalsLimit; + type MaxTips = MaxTips; + type MaxDeadlineDuration = MaxDeadlineDuration; + type Features = Features; + type WeightInfo = pallet_nfts::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type Locker = (); +} + impl pallet_transaction_storage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -1705,6 +1741,7 @@ construct_runtime!( Lottery: pallet_lottery, Nis: pallet_nis, Uniques: pallet_uniques, + Nfts: pallet_nfts, TransactionStorage: pallet_transaction_storage, VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, @@ -1836,6 +1873,7 @@ mod benches { [pallet_transaction_storage, TransactionStorage] [pallet_treasury, Treasury] [pallet_uniques, Uniques] + [pallet_nfts, Nfts] [pallet_utility, Utility] [pallet_vesting, Vesting] [pallet_whitelist, Whitelist] diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 05e3163dcc7bd..0d00257fa7b06 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -21,7 +21,7 @@ use sp_consensus::BlockOrigin; use sp_core::storage::StorageKey; use sp_runtime::{ - generic::{BlockId, SignedBlock}, + generic::SignedBlock, traits::{Block as BlockT, NumberFor}, Justifications, }; @@ -120,14 +120,13 @@ pub trait BlockBackend { /// that are indexed by the runtime with `storage_index_transaction`. fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>>; - /// Get full block by id. - fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; + /// Get full block by hash. + fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>>; - /// Get block status. - fn block_status(&self, id: &BlockId) - -> sp_blockchain::Result; + /// Get block status by block hash. + fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result; - /// Get block justifications for the block with the given id. + /// Get block justifications for the block with the given hash. fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result>; /// Get block hash by number. diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index 9fcc381f9697e..4dd23581d2622 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -144,7 +144,9 @@ impl StorageNotifications { filter_keys: Option<&[StorageKey]>, filter_child_keys: Option<&[(StorageKey, Option>)]>, ) -> StorageEventStream { - let receiver = self.0.subscribe(registry::SubscribeOp { filter_keys, filter_child_keys }); + let receiver = self + .0 + .subscribe(registry::SubscribeOp { filter_keys, filter_child_keys }, 100_000); StorageEventStream(receiver) } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 59a133b86214e..4fea98c6eb24e 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -120,7 +120,7 @@ where ) -> Result { let beefy_best_block = Arc::new(RwLock::new(None)); - let stream = best_block_stream.subscribe(); + let stream = best_block_stream.subscribe(100_000); let closure_clone = beefy_best_block.clone(); let future = stream.for_each(move |best_beefy| { let async_clone = closure_clone.clone(); @@ -141,7 +141,7 @@ where fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self .finality_proof_stream - .subscribe() + .subscribe(100_000) .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); let fut = async move { diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 35f7cac55a964..5b6531822a0a1 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -265,7 +265,7 @@ where // Subscribe to finality notifications and justifications before waiting for runtime pallet and // reuse the streams, so we don't miss notifications while waiting for pallet to be available. let mut finality_notifications = client.finality_notification_stream().fuse(); - let block_import_justif = links.from_block_import_justif_stream.subscribe().fuse(); + let block_import_justif = links.from_block_import_justif_stream.subscribe(100_000).fuse(); // Wait for BEEFY pallet to be active before starting voter. let persisted_state = diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index ce6f6ae3c978a..66d3a210d7eba 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -430,8 +430,8 @@ pub(crate) fn get_beefy_streams( let beefy_rpc_links = net.peer(index).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; - best_block_streams.push(from_voter_best_beefy_stream.subscribe()); - versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); + best_block_streams.push(from_voter_best_beefy_stream.subscribe(100_000)); + versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe(100_000)); }); (best_block_streams, versioned_finality_proof_streams) } @@ -736,7 +736,7 @@ async fn beefy_importing_blocks() { let hashof1 = block.header.hash(); // Import without justifications. - let mut justif_recv = justif_stream.subscribe(); + let mut justif_recv = justif_stream.subscribe(100_000); assert_eq!( block_import .import_block(params(block.clone(), None), HashMap::new()) @@ -779,7 +779,7 @@ async fn beefy_importing_blocks() { let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; let hashof2 = block.header.hash(); - let mut justif_recv = justif_stream.subscribe(); + let mut justif_recv = justif_stream.subscribe(100_000); assert_eq!( block_import.import_block(params(block, justif), HashMap::new()).await.unwrap(), ImportResult::Imported(ImportedAux { @@ -823,7 +823,7 @@ async fn beefy_importing_blocks() { let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; let hashof3 = block.header.hash(); - let mut justif_recv = justif_stream.subscribe(); + let mut justif_recv = justif_stream.subscribe(100_000); assert_eq!( block_import.import_block(params(block, justif), HashMap::new()).await.unwrap(), ImportResult::Imported(ImportedAux { diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 91b0745330698..5e1f2bab4be9f 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -50,6 +50,7 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } [dev-dependencies] tempfile = "3.1.0" futures-timer = "3.0.1" +sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } [features] default = ["rocksdb"] diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index e2f83200e511c..76fedff4d3ad7 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -23,7 +23,7 @@ use crate::{ }; use clap::Parser; use log::info; -use sc_client_api::{BlockBackend, UsageProvider}; +use sc_client_api::{BlockBackend, HeaderBackend, UsageProvider}; use sc_service::{chain_ops::export_blocks, config::DatabaseSource}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc}; @@ -73,7 +73,7 @@ impl ExportBlocksCmd { ) -> error::Result<()> where B: BlockT, - C: BlockBackend + UsageProvider + 'static, + C: HeaderBackend + BlockBackend + UsageProvider + 'static, <::Number as FromStr>::Err: Debug, { if let Some(path) = database_config.path() { diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 7e50f53d7169a..fd01ba67bab7d 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -28,21 +28,44 @@ pub struct PruningParams { /// This mode specifies when the block's state (ie, storage) /// should be pruned (ie, removed) from the database. /// + /// This setting can only be set on the first creation of the database. Every subsequent run + /// will load the pruning mode from the database and will error if the stored mode doesn't + /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. + /// /// Possible values: - /// 'archive' Keep the state of all blocks. - /// 'archive-canonical' Keep only the state of finalized blocks. - /// number Keep the state of the last number of finalized blocks. - #[arg(alias = "pruning", long, value_name = "PRUNING_MODE", default_value = "256")] - pub state_pruning: DatabasePruningMode, + /// + /// - archive: + /// + /// Keep the state of all blocks. + /// + /// - 'archive-canonical' + /// + /// Keep only the state of finalized blocks. + /// + /// - number + /// + /// Keep the state of the last number of finalized blocks. + /// + /// [default: 256] + #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] + pub state_pruning: Option, /// Specify the blocks pruning mode. /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. /// /// Possible values: - /// 'archive' Keep all blocks. - /// 'archive-canonical' Keep only finalized blocks. - /// number Keep the last `number` of finalized blocks. + /// - 'archive' + /// + /// Keep all blocks. + /// + /// - 'archive-canonical' + /// + /// Keep only finalized blocks. + /// + /// - number + /// + /// Keep the last `number` of finalized blocks. #[arg( alias = "keep-blocks", long, @@ -55,7 +78,7 @@ pub struct PruningParams { impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - Ok(Some(self.state_pruning.into())) + Ok(self.state_pruning.map(|v| v.into())) } /// Get the block pruning value from the parameters diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 897a2c4726ae8..c7a6f1f3c0f99 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -152,10 +152,38 @@ impl Runner { // // This is important to be done before we instruct the tokio runtime to shutdown. Otherwise // the tokio runtime will wait the full 60 seconds for all tasks to stop. - drop(task_manager); + let task_registry = task_manager.into_task_registry(); // Give all futures 60 seconds to shutdown, before tokio "leaks" them. - self.tokio_runtime.shutdown_timeout(Duration::from_secs(60)); + let shutdown_timeout = Duration::from_secs(60); + self.tokio_runtime.shutdown_timeout(shutdown_timeout); + + let running_tasks = task_registry.running_tasks(); + + if !running_tasks.is_empty() { + log::error!("Detected running(potentially stalled) tasks on shutdown:"); + running_tasks.iter().for_each(|(task, count)| { + let instances_desc = + if *count > 1 { format!("with {} instances ", count) } else { "".to_string() }; + + if task.is_default_group() { + log::error!( + "Task \"{}\" was still running {}after waiting {} seconds to finish.", + task.name, + instances_desc, + shutdown_timeout.as_secs(), + ); + } else { + log::error!( + "Task \"{}\" (Group: {}) was still running {}after waiting {} seconds to finish.", + task.name, + task.group, + instances_desc, + shutdown_timeout.as_secs(), + ); + } + }); + } res.map_err(Into::into) } @@ -388,34 +416,75 @@ mod tests { assert!((count as u128) < (Duration::from_secs(30).as_millis() / 50)); } + fn run_test_in_another_process( + test_name: &str, + test_body: impl FnOnce(), + ) -> Option { + if std::env::var("RUN_FORKED_TEST").is_ok() { + test_body(); + None + } else { + let output = std::process::Command::new(std::env::current_exe().unwrap()) + .arg(test_name) + .env("RUN_FORKED_TEST", "1") + .output() + .unwrap(); + + assert!(output.status.success()); + Some(output) + } + } + /// This test ensures that `run_node_until_exit` aborts waiting for "stuck" tasks after 60 /// seconds, aka doesn't wait until they are finished (which may never happen). #[test] fn ensure_run_until_exit_is_not_blocking_indefinitely() { - let runner = create_runner(); + let output = run_test_in_another_process( + "ensure_run_until_exit_is_not_blocking_indefinitely", + || { + sp_tracing::try_init_simple(); + + let runner = create_runner(); + + runner + .run_node_until_exit(move |cfg| async move { + let task_manager = + TaskManager::new(cfg.tokio_handle.clone(), None).unwrap(); + let (sender, receiver) = futures::channel::oneshot::channel(); + + // We need to use `spawn_blocking` here so that we get a dedicated thread + // for our future. This future is more blocking code that will never end. + task_manager.spawn_handle().spawn_blocking("test", None, async move { + let _ = sender.send(()); + loop { + std::thread::sleep(Duration::from_secs(30)); + } + }); + + task_manager.spawn_essential_handle().spawn_blocking( + "test2", + None, + async { + // Let's stop this essential task directly when our other task + // started. It will signal that the task manager should end. + let _ = receiver.await; + }, + ); + + Ok::<_, sc_service::Error>(task_manager) + }) + .unwrap_err(); + }, + ); - runner - .run_node_until_exit(move |cfg| async move { - let task_manager = TaskManager::new(cfg.tokio_handle.clone(), None).unwrap(); - let (sender, receiver) = futures::channel::oneshot::channel(); + let Some(output) = output else { return } ; - // We need to use `spawn_blocking` here so that we get a dedicated thread for our - // future. This future is more blocking code that will never end. - task_manager.spawn_handle().spawn_blocking("test", None, async move { - let _ = sender.send(()); - loop { - std::thread::sleep(Duration::from_secs(30)); - } - }); - - task_manager.spawn_essential_handle().spawn_blocking("test2", None, async { - // Let's stop this essential task directly when our other task started. - // It will signal that the task manager should end. - let _ = receiver.await; - }); + let stderr = dbg!(String::from_utf8(output.stderr).unwrap()); - Ok::<_, sc_service::Error>(task_manager) - }) - .unwrap_err(); + assert!( + stderr.contains("Task \"test\" was still running after waiting 60 seconds to finish.") + ); + assert!(!stderr + .contains("Task \"test2\" was still running after waiting 60 seconds to finish.")); } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1d5d4b5fe5413..41c00169e5412 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -543,7 +543,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B let stale_forks = match client.expand_forks(¬ification.stale_heads) { Ok(stale_forks) => stale_forks, Err((stale_forks, e)) => { - warn!(target: "babe", "{:?}", e,); + warn!(target: LOG_TARGET, "{:?}", e); stale_forks }, }; @@ -1511,11 +1511,12 @@ where if let Some(next_epoch_descriptor) = next_epoch_digest { old_epoch_changes = Some((*epoch_changes).clone()); - let viable_epoch = epoch_changes + let mut viable_epoch = epoch_changes .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + })? + .into_cloned(); let epoch_config = next_config_digest .map(Into::into) @@ -1528,6 +1529,48 @@ where log::Level::Info }; + if viable_epoch.as_ref().end_slot() <= slot { + // some epochs must have been skipped as our current slot + // fits outside the current epoch. we will figure out + // which epoch it belongs to and we will re-use the same + // data for that epoch + let mut epoch_data = viable_epoch.as_mut(); + let skipped_epochs = + *slot.saturating_sub(epoch_data.start_slot) / epoch_data.duration; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // we do not update the original epoch that will be re-used because there might + // be other forks (that we haven't imported) where the epoch isn't skipped, and + // to import those forks we want to keep the original epoch data. not updating + // the original epoch works because when we search the tree for which epoch to + // use for a given slot, we will search in-depth with the predicate + // `epoch.start_slot <= slot` which will still match correctly without updating + // `start_slot` to the correct value as below. + let epoch_index = epoch_data.epoch_index.checked_add(skipped_epochs).expect( + "epoch number is u64; it should be strictly smaller than number of slots; \ + slots relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed.", + ); + + let start_slot = skipped_epochs + .checked_mul(epoch_data.duration) + .and_then(|skipped_slots| epoch_data.start_slot.checked_add(skipped_slots)) + .expect( + "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed.", + ); + + warn!( + target: LOG_TARGET, + "👶 Epoch(s) skipped: from {} to {}", epoch_data.epoch_index, epoch_index, + ); + + epoch_data.epoch_index = epoch_index; + epoch_data.start_slot = Slot::from(start_slot); + } + log!( target: LOG_TARGET, log_level, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 84ac3d7341199..f74864a003e2a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -28,6 +28,7 @@ use rand_chacha::{ use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; +use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; @@ -1109,3 +1110,263 @@ async fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); } + +#[tokio::test] +async fn allows_skipping_epochs() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_client(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let epoch_changes = data.link.epoch_changes.clone(); + let epoch_length = data.link.config.epoch_length; + + // we create all of the blocks in epoch 0 as well as a block in epoch 1 + let blocks = propose_and_import_blocks( + &client, + &mut proposer_factory, + &mut block_import, + client.chain_info().genesis_hash, + epoch_length as usize + 1, + ) + .await; + + // the first block in epoch 0 (#1) announces both epoch 0 and 1 (this is a + // special genesis epoch) + let epoch0 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis0, + hash: blocks[0], + number: 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch0.epoch_index, 0); + assert_eq!(epoch0.start_slot, Slot::from(1)); + + let epoch1 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis1, + hash: blocks[0], + number: 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch1.epoch_index, 1); + assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1)); + + // the first block in epoch 1 (#7) announces epoch 2. we will be skipping + // this epoch and therefore re-using its data for epoch 3 + let epoch2 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: blocks[epoch_length as usize], + number: epoch_length + 1, + }) + .unwrap() + .clone(); + + assert_eq!(epoch2.epoch_index, 2); + assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1)); + + // we now author a block that belongs to epoch 3, thereby skipping epoch 2 + let last_block = client.expect_header(*blocks.last().unwrap()).unwrap(); + let block = propose_and_import_block( + &last_block, + Some((epoch_length * 3 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // and the first block in epoch 3 (#8) announces epoch 4 + let epoch4 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: block, + number: epoch_length + 2, + }) + .unwrap() + .clone(); + + assert_eq!(epoch4.epoch_index, 4); + assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1)); + + // if we try to get the epoch data for a slot in epoch 3 + let epoch3 = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &block, + epoch_length + 2, + (epoch_length * 3 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get back the data for epoch 2 + assert_eq!(epoch3, epoch2); + + // but if we try to get the epoch data for a slot in epoch 4 + let epoch4_ = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get epoch 4 as expected + assert_eq!(epoch4, epoch4_); +} + +#[tokio::test] +async fn allows_skipping_epochs_on_some_forks() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_client(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let epoch_changes = data.link.epoch_changes.clone(); + let epoch_length = data.link.config.epoch_length; + + // we create all of the blocks in epoch 0 as well as two blocks in epoch 1 + let blocks = propose_and_import_blocks( + &client, + &mut proposer_factory, + &mut block_import, + client.chain_info().genesis_hash, + epoch_length as usize + 1, + ) + .await; + + // we now author a block that belongs to epoch 2, built on top of the last + // authored block in epoch 1. + let last_block = client.expect_header(*blocks.last().unwrap()).unwrap(); + + let epoch2_block = propose_and_import_block( + &last_block, + Some((epoch_length * 2 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // if we try to get the epoch data for a slot in epoch 2, we get the data that + // was previously announced when epoch 1 started + let epoch2 = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch2_block, + epoch_length + 2, + (epoch_length * 2 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we now author a block that belongs to epoch 3, built on top of the last + // authored block in epoch 1. authoring this block means we're skipping epoch 2 + // entirely on this fork + let epoch3_block = propose_and_import_block( + &last_block, + Some((epoch_length * 3 + 1).into()), + &mut proposer_factory, + &mut block_import, + ) + .await; + + // if we try to get the epoch data for a slot in epoch 3 + let epoch3_ = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch3_block, + epoch_length + 2, + (epoch_length * 3 + 2).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + // we get back the data for epoch 2 + assert_eq!(epoch3_, epoch2); + + // if we try to get the epoch data for a slot in epoch 4 in the fork + // where we skipped epoch 2, we should get the epoch data for epoch 4 + // that was announced at the beginning of epoch 3 + let epoch_data = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch3_block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + assert!(epoch_data != epoch3_); + + // if we try to get the epoch data for a slot in epoch 4 in the fork + // where we didn't skip epoch 2, we should get back the data for epoch 3, + // that was announced when epoch 2 started in that fork + let epoch_data = epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*client), + &epoch2_block, + epoch_length + 2, + (epoch_length * 4 + 1).into(), + |slot| Epoch::genesis(&data.link.config, slot), + ) + .unwrap() + .unwrap(); + + assert!(epoch_data != epoch3_); + + let epoch3 = epoch_changes + .shared_data() + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: epoch2_block, + number: epoch_length + 2, + }) + .unwrap() + .clone(); + + assert_eq!(epoch_data, epoch3); +} diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index b63bc192b2e77..a96b0a47e57c7 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -69,7 +69,7 @@ impl BasicQueue { spawner: &impl sp_core::traits::SpawnEssentialNamed, prometheus_registry: Option<&Registry>, ) -> Self { - let (result_sender, result_port) = buffered_link::buffered_link(); + let (result_sender, result_port) = buffered_link::buffered_link(100_000); let metrics = prometheus_registry.and_then(|r| { Metrics::register(r) @@ -276,10 +276,10 @@ impl BlockImportWorker { use worker_messages::*; let (justification_sender, mut justification_port) = - tracing_unbounded("mpsc_import_queue_worker_justification"); + tracing_unbounded("mpsc_import_queue_worker_justification", 100_000); let (block_import_sender, block_import_port) = - tracing_unbounded("mpsc_import_queue_worker_blocks"); + tracing_unbounded("mpsc_import_queue_worker_blocks", 100_000); let mut worker = BlockImportWorker { result_sender, justification_import, metrics }; @@ -595,7 +595,7 @@ mod tests { #[test] fn prioritizes_finality_work_over_block_import() { - let (result_sender, mut result_port) = buffered_link::buffered_link(); + let (result_sender, mut result_port) = buffered_link::buffered_link(100_000); let (worker, mut finality_sender, mut block_import_sender) = BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None); diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index e6d3b212fdbac..71adcf2dc2ea9 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -28,7 +28,7 @@ //! # use sp_test_primitives::Block; //! # struct DummyLink; impl Link for DummyLink {} //! # let mut my_link = DummyLink; -//! let (mut tx, mut rx) = buffered_link::(); +//! let (mut tx, mut rx) = buffered_link::(100_000); //! tx.blocks_processed(0, 0, vec![]); //! //! // Calls `my_link.blocks_processed(0, 0, vec![])` when polled. @@ -51,9 +51,11 @@ use super::BlockImportResult; /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer -/// them to another link. -pub fn buffered_link() -> (BufferedLinkSender, BufferedLinkReceiver) { - let (tx, rx) = tracing_unbounded("mpsc_buffered_link"); +/// them to another link. `queue_size_warning` sets the warning threshold of the channel queue size. +pub fn buffered_link( + queue_size_warning: i64, +) -> (BufferedLinkSender, BufferedLinkReceiver) { + let (tx, rx) = tracing_unbounded("mpsc_buffered_link", queue_size_warning); let tx = BufferedLinkSender { tx }; let rx = BufferedLinkReceiver { rx: rx.fuse() }; (tx, rx) @@ -175,7 +177,7 @@ mod tests { #[test] fn is_closed() { - let (tx, rx) = super::buffered_link::(); + let (tx, rx) = super::buffered_link::(1); assert!(!tx.is_closed()); drop(rx); assert!(tx.is_closed()); diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index dfdad666ba8f3..70ff7ed176869 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -104,7 +104,7 @@ where } fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { - let stream = self.justification_stream.subscribe().map( + let stream = self.justification_stream.subscribe(100_000).map( |x: sc_finality_grandpa::GrandpaJustification| { JustificationNotification::from(x) }, diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 408cbda745e56..cbcafc727d436 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -1364,7 +1364,7 @@ impl GossipValidator { None => None, }; - let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator"); + let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator", 100_000); let val = GossipValidator { inner: parking_lot::RwLock::new(Inner::new(config)), set_state, diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index 7e50abb96e441..c00fed1296512 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -70,6 +70,7 @@ impl NeighborPacketWorker { pub(super) fn new(rebroadcast_period: Duration) -> (Self, NeighborPacketSender) { let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( "mpsc_grandpa_neighbor_packet_worker", + 100_000, ); let delay = Delay::new(rebroadcast_period); diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index eab7bb2df50cf..839b2d52be651 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -135,7 +135,7 @@ impl NetworkEventStream for TestNetwork { &self, _name: &'static str, ) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); + let (tx, rx) = tracing_unbounded("test", 100_000); let _ = self.sender.unbounded_send(Event::EventStream(tx)); Box::pin(rx) } @@ -253,7 +253,7 @@ fn voter_set_state() -> SharedVoterSetState { // needs to run in a tokio runtime. pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { - let (tx, rx) = tracing_unbounded("test"); + let (tx, rx) = tracing_unbounded("test", 100_000); let net = TestNetwork { sender: tx }; #[derive(Clone)] diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index efc46d8f93a6d..1597e60bd6061 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -566,7 +566,8 @@ where } })?; - let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); + let (voter_commands_tx, voter_commands_rx) = + tracing_unbounded("mpsc_grandpa_voter_command", 100_000); let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 1efb71e5903ec..96101a8eda0ab 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -437,7 +437,7 @@ mod tests { aux_schema::load_persistent(&*backend, client.info().genesis_hash, 0, || Ok(voters)) .unwrap(); - let (_tx, voter_command_rx) = tracing_unbounded(""); + let (_tx, voter_command_rx) = tracing_unbounded("test_mpsc_voter_command", 100_000); let observer = ObserverWork::new( client, diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 95b658e92298a..776411f8fb493 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -579,7 +579,7 @@ mod tests { impl TestChainState { fn new() -> (Self, ImportNotifications) { - let (tx, rx) = tracing_unbounded("test"); + let (tx, rx) = tracing_unbounded("test", 100_000); let state = TestChainState { sender: tx, known_blocks: Arc::new(Mutex::new(HashMap::new())) }; @@ -680,7 +680,7 @@ mod tests { // enact all dependencies before importing the message enact_dependencies(&chain_state); - let (global_tx, global_rx) = tracing_unbounded("test"); + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); let until_imported = UntilGlobalMessageBlocksImported::new( import_notifications, @@ -708,7 +708,7 @@ mod tests { let (chain_state, import_notifications) = TestChainState::new(); let block_status = chain_state.block_status(); - let (global_tx, global_rx) = tracing_unbounded("test"); + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); let until_imported = UntilGlobalMessageBlocksImported::new( import_notifications, @@ -896,7 +896,7 @@ mod tests { let (chain_state, import_notifications) = TestChainState::new(); let block_status = chain_state.block_status(); - let (global_tx, global_rx) = tracing_unbounded("test"); + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); let block_sync_requester = TestBlockSyncRequester::default(); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 66b831dd75789..4356cc5df1b13 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "4.1" async-trait = "0.1" asynchronous-codec = "0.6" +backtrace = "0.3.67" bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } either = "1.5.3" diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 5e8219c550d19..802cf75fc4709 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -70,7 +70,7 @@ pub struct StateDownloadProgress { } /// Syncing status and statistics. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct SyncStatus { /// Current global sync state. pub state: SyncState>, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index f04699d72d9b5..f943a03f50b38 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -208,7 +208,7 @@ where ¶ms.network_config.transport, )?; - let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker"); + let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker", 100_000); if let Some(path) = ¶ms.network_config.net_config_path { fs::create_dir_all(path)?; @@ -1009,7 +1009,7 @@ where H: ExHashT, { fn event_stream(&self, name: &'static str) -> Pin + Send>> { - let (tx, rx) = out_events::channel(name); + let (tx, rx) = out_events::channel(name, 100_000); let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); Box::pin(rx) } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 4144d7f19551e..5e0c8ac6a1a40 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -31,7 +31,9 @@ //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. +use backtrace::Backtrace; use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; +use log::error; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_network_common::protocol::event::Event; @@ -39,18 +41,31 @@ use std::{ cell::RefCell, fmt, pin::Pin, - sync::Arc, + sync::{ + atomic::{AtomicI64, Ordering}, + Arc, + }, task::{Context, Poll}, }; /// Creates a new channel that can be associated to a [`OutChannels`]. /// -/// The name is used in Prometheus reports. -pub fn channel(name: &'static str) -> (Sender, Receiver) { +/// The name is used in Prometheus reports, the queue size threshold is used +/// to warn if there are too many unprocessed events in the channel. +pub fn channel(name: &'static str, queue_size_warning: i64) -> (Sender, Receiver) { let (tx, rx) = mpsc::unbounded(); let metrics = Arc::new(Mutex::new(None)); - let tx = Sender { inner: tx, name, metrics: metrics.clone() }; - let rx = Receiver { inner: rx, name, metrics }; + let queue_size = Arc::new(AtomicI64::new(0)); + let tx = Sender { + inner: tx, + name, + queue_size: queue_size.clone(), + queue_size_warning, + warning_fired: false, + creation_backtrace: Backtrace::new_unresolved(), + metrics: metrics.clone(), + }; + let rx = Receiver { inner: rx, name, queue_size, metrics }; (tx, rx) } @@ -63,8 +78,21 @@ pub fn channel(name: &'static str) -> (Sender, Receiver) { /// sync on drop. If someone adds a `#[derive(Clone)]` below, it is **wrong**. pub struct Sender { inner: mpsc::UnboundedSender, + /// Name to identify the channel (e.g., in Prometheus and logs). name: &'static str, - /// Clone of [`Receiver::metrics`]. + /// Number of events in the queue. Clone of [`Receiver::in_transit`]. + // To not bother with ordering and possible underflow errors of the unsigned counter + // we just use `i64` and `Ordering::Relaxed`, and perceive `queue_size` as approximate. + // It can turn < 0 though. + queue_size: Arc, + /// Threshold queue size to generate an error message in the logs. + queue_size_warning: i64, + /// We generate the error message only once to not spam the logs. + warning_fired: bool, + /// Backtrace of a place where the channel was created. + creation_backtrace: Backtrace, + /// Clone of [`Receiver::metrics`]. Will be initialized when [`Sender`] is added to + /// [`OutChannels`] with `OutChannels::push()`. metrics: Arc>>>>, } @@ -87,6 +115,7 @@ impl Drop for Sender { pub struct Receiver { inner: mpsc::UnboundedReceiver, name: &'static str, + queue_size: Arc, /// Initially contains `None`, and will be set to a value once the corresponding [`Sender`] /// is assigned to an instance of [`OutChannels`]. metrics: Arc>>>>, @@ -97,6 +126,7 @@ impl Stream for Receiver { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { if let Some(ev) = ready!(Pin::new(&mut self.inner).poll_next(cx)) { + let _ = self.queue_size.fetch_sub(1, Ordering::Relaxed); let metrics = self.metrics.lock().clone(); match metrics.as_ref().map(|m| m.as_ref()) { Some(Some(metrics)) => metrics.event_out(&ev, self.name), @@ -160,12 +190,23 @@ impl OutChannels { /// Sends an event. pub fn send(&mut self, event: Event) { - self.event_streams - .retain(|sender| sender.inner.unbounded_send(event.clone()).is_ok()); + self.event_streams.retain_mut(|sender| { + let queue_size = sender.queue_size.fetch_add(1, Ordering::Relaxed); + if queue_size == sender.queue_size_warning && !sender.warning_fired { + sender.warning_fired = true; + sender.creation_backtrace.resolve(); + error!( + "The number of unprocessed events in channel `{}` reached {}.\n\ + The channel was created at:\n{:?}", + sender.name, sender.queue_size_warning, sender.creation_backtrace, + ); + } + sender.inner.unbounded_send(event.clone()).is_ok() + }); if let Some(metrics) = &*self.metrics { for ev in &self.event_streams { - metrics.event_in(&event, 1, ev.name); + metrics.event_in(&event, ev.name); } } } @@ -232,45 +273,35 @@ impl Metrics { }) } - fn event_in(&self, event: &Event, num: u64, name: &str) { + fn event_in(&self, event: &Event, name: &str) { match event { Event::Dht(_) => { - self.events_total.with_label_values(&["dht", "sent", name]).inc_by(num); + self.events_total.with_label_values(&["dht", "sent", name]).inc(); }, Event::SyncConnected { .. } => { - self.events_total - .with_label_values(&["sync-connected", "sent", name]) - .inc_by(num); + self.events_total.with_label_values(&["sync-connected", "sent", name]).inc(); }, Event::SyncDisconnected { .. } => { - self.events_total - .with_label_values(&["sync-disconnected", "sent", name]) - .inc_by(num); + self.events_total.with_label_values(&["sync-disconnected", "sent", name]).inc(); }, Event::NotificationStreamOpened { protocol, .. } => { format_label("notif-open-", protocol, |protocol_label| { - self.events_total - .with_label_values(&[protocol_label, "sent", name]) - .inc_by(num); + self.events_total.with_label_values(&[protocol_label, "sent", name]).inc(); }); }, Event::NotificationStreamClosed { protocol, .. } => { format_label("notif-closed-", protocol, |protocol_label| { - self.events_total - .with_label_values(&[protocol_label, "sent", name]) - .inc_by(num); + self.events_total.with_label_values(&[protocol_label, "sent", name]).inc(); }); }, Event::NotificationsReceived { messages, .. } => for (protocol, message) in messages { format_label("notif-", protocol, |protocol_label| { - self.events_total - .with_label_values(&[protocol_label, "sent", name]) - .inc_by(num); + self.events_total.with_label_values(&[protocol_label, "sent", name]).inc(); }); - self.notifications_sizes.with_label_values(&[protocol, "sent", name]).inc_by( - num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX)), - ); + self.notifications_sizes + .with_label_values(&[protocol, "sent", name]) + .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); }, } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 7dd818d4c12cb..312fc6f5b7947 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -86,7 +86,6 @@ use sp_consensus::{ BlockOrigin, BlockStatus, }; use sp_runtime::{ - generic::BlockId, traits::{ Block as BlockT, CheckedSub, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, @@ -1436,7 +1435,7 @@ where state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { - let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); + let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); let block_announce_config = Self::get_block_announce_proto_config( protocol_id, fork_id, @@ -1865,8 +1864,7 @@ where self.best_queued_number = info.best_number; if self.mode == SyncMode::Full && - self.client.block_status(&BlockId::hash(info.best_hash))? != - BlockStatus::InChainWithState + self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState { self.import_existing = true; // Latest state is missing, start with the last finalized state or genesis instead. @@ -1898,7 +1896,7 @@ where if self.queue_blocks.contains(hash) { return Ok(BlockStatus::Queued) } - self.client.block_status(&BlockId::Hash(*hash)) + self.client.block_status(*hash) } /// Is the block corresponding to the given hash known? @@ -2455,9 +2453,7 @@ where if queue.contains(hash) { BlockStatus::Queued } else { - client - .block_status(&BlockId::Hash(*hash)) - .unwrap_or(BlockStatus::Unknown) + client.block_status(*hash).unwrap_or(BlockStatus::Unknown) } }, ) { @@ -3202,6 +3198,7 @@ mod test { }; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; + use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs index c44398b0f1a9e..b81a65ae731cf 100644 --- a/client/network/sync/src/service/network.rs +++ b/client/network/sync/src/service/network.rs @@ -99,7 +99,7 @@ impl NetworkServiceHandle { impl NetworkServiceProvider { /// Create new `NetworkServiceProvider` pub fn new() -> (Self, NetworkServiceHandle) { - let (tx, rx) = tracing_unbounded("mpsc_network_service_provider"); + let (tx, rx) = tracing_unbounded("mpsc_network_service_provider", 100_000); (Self { rx }, NetworkServiceHandle::new(tx)) } diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index bce40bfd7fac8..df13cdcb820d2 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -22,5 +22,6 @@ pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 4cc76507c6f16..a5adb274d29de 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -28,7 +28,7 @@ use crate::config::*; use codec::{Decode, Encode}; -use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; +use futures::{prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; @@ -40,6 +40,7 @@ use sc_network_common::{ utils::{interval, LruHashSet}, ExHashT, }; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; use std::{ collections::{hash_map::Entry, HashMap}, @@ -168,7 +169,7 @@ impl TransactionsHandlerPrototype { metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { let event_stream = service.event_stream("transactions-handler"); - let (to_handler, from_controller) = mpsc::unbounded(); + let (to_handler, from_controller) = tracing_unbounded("mpsc_transactions_handler", 100_000); let handler = TransactionsHandler { protocol_name: self.protocol_name, @@ -197,7 +198,7 @@ impl TransactionsHandlerPrototype { /// Controls the behaviour of a [`TransactionsHandler`] it is connected to. pub struct TransactionsHandlerController { - to_handler: mpsc::UnboundedSender>, + to_handler: TracingUnboundedSender>, } impl TransactionsHandlerController { @@ -246,7 +247,7 @@ pub struct TransactionsHandler< // All connected peers peers: HashMap>, transaction_pool: Arc>, - from_controller: mpsc::UnboundedReceiver>, + from_controller: TracingUnboundedReceiver>, /// Prometheus metrics. metrics: Option, } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 4c97e5a47058d..a47adb3e8026e 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -66,8 +66,8 @@ impl SharedClient { /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. pub fn http(shared_client: SharedClient) -> (HttpApi, HttpWorker) { - let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker"); - let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api"); + let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker", 100_000); + let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api", 100_000); let api = HttpApi { to_worker, diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index ec09835c4898e..9b1dc6a2d0276 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -275,7 +275,7 @@ pub struct Peerset { impl Peerset { /// Builds a new peerset from the given configuration. pub fn from_config(config: PeersetConfig) -> (Self, PeersetHandle) { - let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); + let (tx, rx) = tracing_unbounded("mpsc_peerset_messages", 10_000); let handle = PeersetHandle { tx: tx.clone() }; diff --git a/client/rpc-spec-v2/src/chain_head/chain_head.rs b/client/rpc-spec-v2/src/chain_head/chain_head.rs index c0ba9e165dd04..c63d373e04f16 100644 --- a/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -573,7 +573,7 @@ where return } - let event = match client.block(&BlockId::Hash(hash)) { + let event = match client.block(hash) { Ok(Some(signed_block)) => { let extrinsics = signed_block.block.extrinsics(); let result = format!("0x{:?}", HexDisplay::from(&extrinsics.encode())); diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index ee7870e0629e0..6ff544b0deacd 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -29,10 +29,7 @@ use futures::{ use jsonrpsee::SubscriptionSink; use sc_client_api::{BlockBackend, BlockchainEvents}; use sp_blockchain::HeaderBackend; -use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::Block as BlockT, -}; +use sp_runtime::{generic::SignedBlock, traits::Block as BlockT}; /// Blockchain API backend for full nodes. Reads all the data from local database. pub struct FullChain { @@ -66,7 +63,7 @@ where } fn block(&self, hash: Option) -> Result>, Error> { - self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) + self.client.block(self.unwrap_or_best(hash)).map_err(client_err) } fn subscribe_all_heads(&self, sink: SubscriptionSink) { diff --git a/client/rpc/src/dev/mod.rs b/client/rpc/src/dev/mod.rs index ec9e61678fd71..e48a8ee4e7d5c 100644 --- a/client/rpc/src/dev/mod.rs +++ b/client/rpc/src/dev/mod.rs @@ -69,10 +69,7 @@ where self.deny_unsafe.check_if_safe()?; let block = { - let block = self - .client - .block(&BlockId::Hash(hash)) - .map_err(|e| Error::BlockQueryError(Box::new(e)))?; + let block = self.client.block(hash).map_err(|e| Error::BlockQueryError(Box::new(e)))?; if let Some(block) = block { let (mut header, body) = block.block.deconstruct(); // Remove the `Seal` to ensure we have the number of digests as expected by the diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 00ab9c46861e2..4da49cdd1a0c5 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -52,7 +52,7 @@ impl Default for Status { fn api>>(sync: T) -> RpcModule> { let status = sync.into().unwrap_or_default(); let should_have_peers = !status.is_dev; - let (tx, rx) = tracing_unbounded("rpc_system_tests"); + let (tx, rx) = tracing_unbounded("rpc_system_tests", 10_000); thread::spawn(move || { futures::executor::block_on(rx.for_each(move |request| { match request { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 0a26c00485e2f..1f94f96fae89e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -962,7 +962,7 @@ where ); spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(chain_sync_service))); - let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000); let future = build_network_future( config.role.clone(), diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index 41a6c73c5f473..c3380e5476ab6 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -18,34 +18,37 @@ use crate::error::Error; use codec::Encode; -use futures::{future, prelude::*}; use sc_client_api::{BlockBackend, HeaderBackend}; use sc_consensus::import_queue::ImportQueue; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use crate::chain_ops::import_blocks; -use std::{pin::Pin, sync::Arc}; +use std::sync::Arc; /// Re-validate known block. -pub fn check_block( +pub async fn check_block( client: Arc, import_queue: IQ, block_id: BlockId, -) -> Pin> + Send>> +) -> Result<(), Error> where C: BlockBackend + HeaderBackend + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, IQ: ImportQueue + 'static, { - match client.block(&block_id) { - Ok(Some(block)) => { + let maybe_block = client + .block_hash_from_id(&block_id)? + .map(|hash| client.block(hash)) + .transpose()? + .flatten(); + match maybe_block { + Some(block) => { let mut buf = Vec::new(); 1u64.encode_to(&mut buf); block.encode_to(&mut buf); let reader = std::io::Cursor::new(buf); - import_blocks(client, import_queue, reader, true, true) + import_blocks(client, import_queue, reader, true, true).await }, - Ok(None) => Box::pin(future::err("Unknown block".into())), - Err(e) => Box::pin(future::err(format!("Error reading block: {}", e).into())), + None => Err("Unknown block")?, } } diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index d442a11f2c39b..9638722ef6b21 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -25,7 +25,7 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor, One, SaturatedConversion, Zero}, }; -use sc_client_api::{BlockBackend, UsageProvider}; +use sc_client_api::{BlockBackend, HeaderBackend, UsageProvider}; use std::{io::Write, pin::Pin, sync::Arc, task::Poll}; /// Performs the blocks export. @@ -37,7 +37,7 @@ pub fn export_blocks( binary: bool, ) -> Pin>>> where - C: BlockBackend + UsageProvider + 'static, + C: HeaderBackend + BlockBackend + UsageProvider + 'static, B: BlockT, { let mut block = from; @@ -75,7 +75,12 @@ where wrote_header = true; } - match client.block(&BlockId::number(block))? { + match client + .block_hash_from_id(&BlockId::number(block))? + .map(|hash| client.block(hash)) + .transpose()? + .flatten() + { Some(block) => if binary { output.write_all(&block.encode())?; @@ -83,7 +88,6 @@ where serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; }, - // Reached end of the chain. None => return Poll::Ready(Ok(())), } if (block % 10000u32.into()).is_zero() { diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 493fd320b7b23..18012fc1931fe 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -784,7 +784,8 @@ where let parent_hash = import_block.header.parent_hash(); let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); - let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { + let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action) + { (BlockStatus::KnownBad, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), ( @@ -1025,17 +1026,18 @@ where } /// Get block status. - pub fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { + pub fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result { // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued) - } + if self + .importing_block + .read() + .as_ref() + .map_or(false, |importing| &hash == importing) + { + return Ok(BlockStatus::Queued) } - let hash_and_number = match *id { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; + + let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n)); match hash_and_number { Some((hash, number)) => if self.backend.have_state_at(hash, number) { @@ -1779,7 +1781,7 @@ where // Own status must be checked first. If the block and ancestry is pruned // this function must return `AlreadyInChain` rather than `MissingState` match self - .block_status(&BlockId::Hash(hash)) + .block_status(hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { BlockStatus::InChainWithState | BlockStatus::Queued => @@ -1792,7 +1794,7 @@ where } match self - .block_status(&BlockId::Hash(parent_hash)) + .block_status(parent_hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { BlockStatus::InChainWithState | BlockStatus::Queued => {}, @@ -1913,13 +1915,13 @@ where { /// Get block import event stream. fn import_notification_stream(&self) -> ImportNotifications { - let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream"); + let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream", 100_000); self.import_notification_sinks.lock().push(sink); stream } fn finality_notification_stream(&self) -> FinalityNotifications { - let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream"); + let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream", 100_000); self.finality_notification_sinks.lock().push(sink); stream } @@ -1947,20 +1949,16 @@ where self.body(hash) } - fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { - Ok(match self.backend.blockchain().block_hash_from_id(id)? { - Some(hash) => - match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { - (Some(header), Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), - _ => None, - }, - None => None, + fn block(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + _ => None, }) } - fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - Client::block_status(self, id) + fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result { + Client::block_status(self, hash) } fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { @@ -2055,9 +2053,9 @@ where { fn block_status( &self, - id: &BlockId, + hash: B::Hash, ) -> Result> { - Client::block_status(self, id).map_err(|e| Box::new(e) as Box<_>) + Client::block_status(self, hash).map_err(|e| Box::new(e) as Box<_>) } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 8b3a29ba4032a..1529b822ade32 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -83,7 +83,7 @@ pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; -pub use task_manager::{SpawnTaskHandle, TaskManager, DEFAULT_GROUP_NAME}; +pub use task_manager::{SpawnTaskHandle, Task, TaskManager, TaskRegistry, DEFAULT_GROUP_NAME}; const DEFAULT_PROTOCOL_ID: &str = "sup"; diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 49189dc21ce8d..d792122576444 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -24,12 +24,19 @@ use futures::{ future::{pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, }; +use parking_lot::Mutex; use prometheus_endpoint::{ exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use std::{panic, pin::Pin, result::Result}; +use std::{ + collections::{hash_map::Entry, HashMap}, + panic, + pin::Pin, + result::Result, + sync::Arc, +}; use tokio::runtime::Handle; use tracing_futures::Instrument; @@ -72,6 +79,7 @@ pub struct SpawnTaskHandle { on_exit: exit_future::Exit, tokio_handle: Handle, metrics: Option, + task_registry: TaskRegistry, } impl SpawnTaskHandle { @@ -113,6 +121,7 @@ impl SpawnTaskHandle { ) { let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); + let registry = self.task_registry.clone(); let group = match group.into() { GroupName::Specific(var) => var, @@ -129,6 +138,10 @@ impl SpawnTaskHandle { } let future = async move { + // Register the task and keep the "token" alive until the task is ended. Then this + // "token" will unregister this task. + let _registry_token = registry.register_task(name, group); + if let Some(metrics) = metrics { // Add some wrappers around `task`. let task = { @@ -298,6 +311,8 @@ pub struct TaskManager { /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec, + /// The registry of all running tasks. + task_registry: TaskRegistry, } impl TaskManager { @@ -310,7 +325,8 @@ impl TaskManager { let (signal, on_exit) = exit_future::signal(); // A side-channel for essential tasks to communicate shutdown. - let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); + let (essential_failed_tx, essential_failed_rx) = + tracing_unbounded("mpsc_essential_tasks", 100); let metrics = prometheus_registry.map(Metrics::register).transpose()?; @@ -323,6 +339,7 @@ impl TaskManager { essential_failed_rx, keep_alive: Box::new(()), children: Vec::new(), + task_registry: Default::default(), }) } @@ -332,6 +349,7 @@ impl TaskManager { on_exit: self.on_exit.clone(), tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), + task_registry: self.task_registry.clone(), } } @@ -384,6 +402,14 @@ impl TaskManager { pub fn add_child(&mut self, child: TaskManager) { self.children.push(child); } + + /// Consume `self` and return the [`TaskRegistry`]. + /// + /// This [`TaskRegistry`] can be used to check for still running tasks after this task manager + /// was dropped. + pub fn into_task_registry(self) -> TaskRegistry { + self.task_registry + } } #[derive(Clone)] @@ -433,3 +459,74 @@ impl Metrics { }) } } + +/// Ensures that a [`Task`] is unregistered when this object is dropped. +struct UnregisterOnDrop { + task: Task, + registry: TaskRegistry, +} + +impl Drop for UnregisterOnDrop { + fn drop(&mut self) { + let mut tasks = self.registry.tasks.lock(); + + if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) { + *entry.get_mut() -= 1; + + if *entry.get() == 0 { + entry.remove(); + } + } + } +} + +/// Represents a running async task in the [`TaskManager`]. +/// +/// As a task is identified by a name and a group, it is totally valid that there exists multiple +/// tasks with the same name and group. +#[derive(Clone, Hash, Eq, PartialEq)] +pub struct Task { + /// The name of the task. + pub name: &'static str, + /// The group this task is associated to. + pub group: &'static str, +} + +impl Task { + /// Returns if the `group` is the [`DEFAULT_GROUP_NAME`]. + pub fn is_default_group(&self) -> bool { + self.group == DEFAULT_GROUP_NAME + } +} + +/// Keeps track of all running [`Task`]s in [`TaskManager`]. +#[derive(Clone, Default)] +pub struct TaskRegistry { + tasks: Arc>>, +} + +impl TaskRegistry { + /// Register a task with the given `name` and `group`. + /// + /// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is + /// dropped. + fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop { + let task = Task { name, group }; + + { + let mut tasks = self.tasks.lock(); + + *(*tasks).entry(task.clone()).or_default() += 1; + } + + UnregisterOnDrop { task, registry: self.clone() } + } + + /// Returns the running tasks. + /// + /// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The + /// number per task represents the concurrently running tasks with the same identifier. + pub fn running_tasks(&self) -> HashMap { + (*self.tasks.lock()).clone() + } +} diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 661cf83fc49bf..97c22a1cb509e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1137,7 +1137,7 @@ fn get_block_by_bad_block_hash_returns_none() { let client = substrate_test_runtime_client::new(); let hash = H256::from_low_u64_be(5); - assert!(client.block(&BlockId::Hash(hash)).unwrap().is_none()); + assert!(client.block(hash).unwrap().is_none()); } #[test] @@ -1497,10 +1497,7 @@ fn returns_status_for_pruned_blocks() { block_on(client.check_block(check_block_a1.clone())).unwrap(), ImportResult::imported(false), ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), - BlockStatus::Unknown, - ); + assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::Unknown); block_on(client.import_as_final(BlockOrigin::Own, a1.clone())).unwrap(); @@ -1508,10 +1505,7 @@ fn returns_status_for_pruned_blocks() { block_on(client.check_block(check_block_a1.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), - BlockStatus::InChainWithState, - ); + assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainWithState); let a2 = client .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) @@ -1534,18 +1528,12 @@ fn returns_status_for_pruned_blocks() { block_on(client.check_block(check_block_a1.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), - BlockStatus::InChainPruned, - ); + assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainPruned); assert_eq!( block_on(client.check_block(check_block_a2.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), - BlockStatus::InChainWithState, - ); + assert_eq!(client.block_status(check_block_a2.hash).unwrap(), BlockStatus::InChainWithState); let a3 = client .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) @@ -1569,26 +1557,17 @@ fn returns_status_for_pruned_blocks() { block_on(client.check_block(check_block_a1.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), - BlockStatus::InChainPruned, - ); + assert_eq!(client.block_status(check_block_a1.hash).unwrap(), BlockStatus::InChainPruned); assert_eq!( block_on(client.check_block(check_block_a2.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), - BlockStatus::InChainPruned, - ); + assert_eq!(client.block_status(check_block_a2.hash).unwrap(), BlockStatus::InChainPruned); assert_eq!( block_on(client.check_block(check_block_a3.clone())).unwrap(), ImportResult::AlreadyInChain, ); - assert_eq!( - client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), - BlockStatus::InChainWithState, - ); + assert_eq!(client.block_status(check_block_a3.hash).unwrap(), BlockStatus::InChainWithState); let mut check_block_b1 = BlockCheckParams { hash: b1.hash(), diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 4d2140d215616..4d09a28370e6d 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -20,6 +20,7 @@ libp2p = { version = "0.50.0", features = ["dns", "tcp", "tokio", "wasm-ext", "w log = "0.4.17" parking_lot = "0.12.1" pin-project = "1.0.12" +sc-utils = { version = "4.0.0-dev", path = "../utils" } rand = "0.8.5" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 503a326f76c2b..aa6b841b79164 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -40,6 +40,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::Multiaddr; use log::{error, warn}; use parking_lot::Mutex; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde::Serialize; use std::{ collections::{ @@ -147,8 +148,8 @@ pub struct SysInfo { pub struct TelemetryWorker { message_receiver: mpsc::Receiver, message_sender: mpsc::Sender, - register_receiver: mpsc::UnboundedReceiver, - register_sender: mpsc::UnboundedSender, + register_receiver: TracingUnboundedReceiver, + register_sender: TracingUnboundedSender, id_counter: Arc, } @@ -163,7 +164,8 @@ impl TelemetryWorker { // error as early as possible. let _transport = initialize_transport()?; let (message_sender, message_receiver) = mpsc::channel(buffer_size); - let (register_sender, register_receiver) = mpsc::unbounded(); + let (register_sender, register_receiver) = + tracing_unbounded("mpsc_telemetry_register", 10_000); Ok(Self { message_receiver, @@ -360,7 +362,7 @@ impl TelemetryWorker { #[derive(Debug, Clone)] pub struct TelemetryWorkerHandle { message_sender: mpsc::Sender, - register_sender: mpsc::UnboundedSender, + register_sender: TracingUnboundedSender, id_counter: Arc, } @@ -386,7 +388,7 @@ impl TelemetryWorkerHandle { #[derive(Debug)] pub struct Telemetry { message_sender: mpsc::Sender, - register_sender: mpsc::UnboundedSender, + register_sender: TracingUnboundedSender, id: Id, connection_notifier: TelemetryConnectionNotifier, endpoints: Option, @@ -460,7 +462,7 @@ impl TelemetryHandle { /// (re-)establishes. #[derive(Clone, Debug)] pub struct TelemetryConnectionNotifier { - register_sender: mpsc::UnboundedSender, + register_sender: TracingUnboundedSender, addresses: Vec, } diff --git a/client/transaction-pool/src/graph/watcher.rs b/client/transaction-pool/src/graph/watcher.rs index 0613300c8684b..df5bb94edfe6d 100644 --- a/client/transaction-pool/src/graph/watcher.rs +++ b/client/transaction-pool/src/graph/watcher.rs @@ -62,7 +62,7 @@ impl Default for Sender { impl Sender { /// Add a new watcher to this sender object. pub fn new_watcher(&mut self, hash: H) -> Watcher { - let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); + let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher", 100_000); self.receivers.push(tx); Watcher { receiver, hash } } diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index b4b4299240a32..d8c8bea625fb3 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -291,7 +291,7 @@ where pool: Arc>, interval: Duration, ) -> (Self, Pin + Send>>) { - let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); + let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue", 100_000); let worker = RevalidationWorker::new(api.clone(), pool.clone()); diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 082ac3b55e80d..e80588453597e 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -10,6 +10,7 @@ description = "I/O for Substrate runtimes" readme = "README.md" [dependencies] +backtrace = "0.3.67" futures = "0.3.21" futures-timer = "3.0.2" lazy_static = "1.4.0" diff --git a/client/utils/src/mpsc.rs b/client/utils/src/mpsc.rs index ee3fba4a5ee67..d74703c4abd08 100644 --- a/client/utils/src/mpsc.rs +++ b/client/utils/src/mpsc.rs @@ -37,6 +37,7 @@ mod inner { mod inner { // tracing implementation use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; + use backtrace::Backtrace; use futures::{ channel::mpsc::{ self, SendError, TryRecvError, TrySendError, UnboundedReceiver, UnboundedSender, @@ -45,71 +46,132 @@ mod inner { stream::{FusedStream, Stream}, task::{Context, Poll}, }; - use std::pin::Pin; + use log::error; + use std::{ + pin::Pin, + sync::{ + atomic::{AtomicBool, AtomicI64, Ordering}, + Arc, + }, + }; /// Wrapper Type around `UnboundedSender` that increases the global /// measure when a message is added #[derive(Debug)] - pub struct TracingUnboundedSender(&'static str, UnboundedSender); + pub struct TracingUnboundedSender { + inner: UnboundedSender, + name: &'static str, + // To not bother with ordering and possible underflow errors of the unsigned counter + // we just use `i64` and `Ordering::Relaxed`, and perceive `queue_size` as approximate. + // It can turn < 0 though. + queue_size: Arc, + queue_size_warning: i64, + warning_fired: Arc, + creation_backtrace: Arc, + } // Strangely, deriving `Clone` requires that `T` is also `Clone`. impl Clone for TracingUnboundedSender { fn clone(&self) -> Self { - Self(self.0, self.1.clone()) + Self { + inner: self.inner.clone(), + name: self.name, + queue_size: self.queue_size.clone(), + queue_size_warning: self.queue_size_warning, + warning_fired: self.warning_fired.clone(), + creation_backtrace: self.creation_backtrace.clone(), + } } } /// Wrapper Type around `UnboundedReceiver` that decreases the global /// measure when a message is polled #[derive(Debug)] - pub struct TracingUnboundedReceiver(&'static str, UnboundedReceiver); + pub struct TracingUnboundedReceiver { + inner: UnboundedReceiver, + name: &'static str, + queue_size: Arc, + } /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via - /// `UNBOUNDED_CHANNELS_COUNTER` + /// `UNBOUNDED_CHANNELS_COUNTER` and warns if the message queue grows + /// above the warning threshold. pub fn tracing_unbounded( - key: &'static str, + name: &'static str, + queue_size_warning: i64, ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key, r)) + let queue_size = Arc::new(AtomicI64::new(0)); + let sender = TracingUnboundedSender { + inner: s, + name, + queue_size: queue_size.clone(), + queue_size_warning, + warning_fired: Arc::new(AtomicBool::new(false)), + creation_backtrace: Arc::new(Backtrace::new_unresolved()), + }; + let receiver = TracingUnboundedReceiver { inner: r, name, queue_size }; + (sender, receiver) } impl TracingUnboundedSender { /// Proxy function to mpsc::UnboundedSender pub fn poll_ready(&self, ctx: &mut Context) -> Poll> { - self.1.poll_ready(ctx) + self.inner.poll_ready(ctx) } /// Proxy function to mpsc::UnboundedSender pub fn is_closed(&self) -> bool { - self.1.is_closed() + self.inner.is_closed() } /// Proxy function to mpsc::UnboundedSender pub fn close_channel(&self) { - self.1.close_channel() + self.inner.close_channel() } /// Proxy function to mpsc::UnboundedSender pub fn disconnect(&mut self) { - self.1.disconnect() + self.inner.disconnect() } - /// Proxy function to mpsc::UnboundedSender pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { - self.1.start_send(msg) + // The underlying implementation of [`UnboundedSender::start_send`] is the same as + // [`UnboundedSender::unbounded_send`], so we just reuse the message counting and + // error reporting code from `unbounded_send`. + self.unbounded_send(msg).map_err(TrySendError::into_send_error) } /// Proxy function to mpsc::UnboundedSender pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.1.unbounded_send(msg).map(|s| { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "send"]).inc(); + self.inner.unbounded_send(msg).map(|s| { + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, "send"]).inc(); + + let queue_size = self.queue_size.fetch_add(1, Ordering::Relaxed); + if queue_size == self.queue_size_warning && + self.warning_fired + .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() + { + // `warning_fired` and `queue_size` are not synchronized, so it's possible + // that the warning is fired few times before the `warning_fired` is seen + // by all threads. This seems better than introducing a mutex guarding them. + let mut backtrace = (*self.creation_backtrace).clone(); + backtrace.resolve(); + error!( + "The number of unprocessed messages in channel `{}` reached {}.\n\ + The channel was created at:\n{:?}", + self.name, self.queue_size_warning, backtrace, + ); + } + s }) } /// Proxy function to mpsc::UnboundedSender pub fn same_receiver(&self, other: &UnboundedSender) -> bool { - self.1.same_receiver(other) + self.inner.same_receiver(other) } } @@ -118,7 +180,7 @@ mod inner { // consume all items, make sure to reflect the updated count let mut count = 0; loop { - if self.1.is_terminated() { + if self.inner.is_terminated() { break } @@ -129,7 +191,9 @@ mod inner { } // and discount the messages if count > 0 { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "dropped"]).inc_by(count); + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.name, "dropped"]) + .inc_by(count); } } @@ -137,15 +201,16 @@ mod inner { /// that consumes all messages first and updates the counter pub fn close(&mut self) { self.consume(); - self.1.close() + self.inner.close() } /// Proxy function to mpsc::UnboundedReceiver /// that discounts the messages taken out pub fn try_next(&mut self) -> Result, TryRecvError> { - self.1.try_next().map(|s| { + self.inner.try_next().map(|s| { if s.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "received"]).inc(); + let _ = self.queue_size.fetch_sub(1, Ordering::Relaxed); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, "received"]).inc(); } s }) @@ -165,10 +230,11 @@ mod inner { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let s = self.get_mut(); - match Pin::new(&mut s.1).poll_next(cx) { + match Pin::new(&mut s.inner).poll_next(cx) { Poll::Ready(msg) => { if msg.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.0, "received"]).inc(); + let _ = s.queue_size.fetch_sub(1, Ordering::Relaxed); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.name, "received"]).inc(); } Poll::Ready(msg) }, @@ -179,7 +245,7 @@ mod inner { impl FusedStream for TracingUnboundedReceiver { fn is_terminated(&self) -> bool { - self.1.is_terminated() + self.inner.is_terminated() } } @@ -223,6 +289,10 @@ mod inner { } fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + // The difference with `TracingUnboundedSender` is intentional. The underlying + // implementation differs for `UnboundedSender` and `&UnboundedSender`: + // the latter closes the channel completely with `close_channel()`, while the former + // only closes this specific sender with `disconnect()`. self.close_channel(); Poll::Ready(Ok(())) } diff --git a/client/utils/src/notification.rs b/client/utils/src/notification.rs index ff527c343f9f2..4917a43265df4 100644 --- a/client/utils/src/notification.rs +++ b/client/utils/src/notification.rs @@ -79,8 +79,8 @@ impl NotificationStream { } /// Subscribe to a channel through which the generic payload can be received. - pub fn subscribe(&self) -> NotificationReceiver { - let receiver = self.hub.subscribe(()); + pub fn subscribe(&self, queue_size_warning: i64) -> NotificationReceiver { + let receiver = self.hub.subscribe((), queue_size_warning); NotificationReceiver { receiver } } } diff --git a/client/utils/src/notification/tests.rs b/client/utils/src/notification/tests.rs index a001fa7e89e95..f813f37d29ddb 100644 --- a/client/utils/src/notification/tests.rs +++ b/client/utils/src/notification/tests.rs @@ -36,7 +36,7 @@ fn notification_channel_simple() { // Create a future to receive a single notification // from the stream and verify its payload. - let future = stream.subscribe().take(1).for_each(move |payload| { + let future = stream.subscribe(100_000).take(1).for_each(move |payload| { let test_payload = closure_payload.clone(); async move { assert_eq!(payload, test_payload); diff --git a/client/utils/src/pubsub.rs b/client/utils/src/pubsub.rs index ba6e9ddc6ca2a..f85f44b498841 100644 --- a/client/utils/src/pubsub.rs +++ b/client/utils/src/pubsub.rs @@ -164,7 +164,7 @@ impl Hub { /// Subscribe to this Hub using the `subs_key: K`. /// /// A subscription with a key `K` is possible if the Registry implements `Subscribe`. - pub fn subscribe(&self, subs_key: K) -> Receiver + pub fn subscribe(&self, subs_key: K, queue_size_warning: i64) -> Receiver where R: Subscribe + Unsubscribe, { @@ -178,7 +178,7 @@ impl Hub { // have the sink disposed. shared_borrowed.registry.subscribe(subs_key, subs_id); - let (tx, rx) = crate::mpsc::tracing_unbounded(self.tracing_key); + let (tx, rx) = crate::mpsc::tracing_unbounded(self.tracing_key, queue_size_warning); assert!(shared_borrowed.sinks.insert(subs_id, tx).is_none(), "Used IDSequence to create another ID. Should be unique until u64 is overflowed. Should be unique."); Receiver { shared: Arc::downgrade(&self.shared), subs_id, rx } diff --git a/client/utils/src/pubsub/tests/normal_operation.rs b/client/utils/src/pubsub/tests/normal_operation.rs index a13c718d74a8f..830388de32e46 100644 --- a/client/utils/src/pubsub/tests/normal_operation.rs +++ b/client/utils/src/pubsub/tests/normal_operation.rs @@ -27,7 +27,7 @@ fn positive_rx_receives_relevant_messages_and_terminates_upon_hub_drop() { // No subscribers yet. That message is not supposed to get to anyone. hub.send(0); - let mut rx_01 = hub.subscribe(SubsKey::new()); + let mut rx_01 = hub.subscribe(SubsKey::new(), 100_000); assert_eq!(hub.subs_count(), 1); // That message is sent after subscription. Should be delivered into rx_01. @@ -49,9 +49,9 @@ fn positive_subs_count_is_correct_upon_drop_of_rxs() { let hub = TestHub::new(TK); assert_eq!(hub.subs_count(), 0); - let rx_01 = hub.subscribe(SubsKey::new()); + let rx_01 = hub.subscribe(SubsKey::new(), 100_000); assert_eq!(hub.subs_count(), 1); - let rx_02 = hub.subscribe(SubsKey::new()); + let rx_02 = hub.subscribe(SubsKey::new(), 100_000); assert_eq!(hub.subs_count(), 2); std::mem::drop(rx_01); @@ -69,11 +69,11 @@ fn positive_subs_count_is_correct_upon_drop_of_rxs_on_cloned_hubs() { assert_eq!(hub_01.subs_count(), 0); assert_eq!(hub_02.subs_count(), 0); - let rx_01 = hub_02.subscribe(SubsKey::new()); + let rx_01 = hub_02.subscribe(SubsKey::new(), 100_000); assert_eq!(hub_01.subs_count(), 1); assert_eq!(hub_02.subs_count(), 1); - let rx_02 = hub_02.subscribe(SubsKey::new()); + let rx_02 = hub_02.subscribe(SubsKey::new(), 100_000); assert_eq!(hub_01.subs_count(), 2); assert_eq!(hub_02.subs_count(), 2); diff --git a/client/utils/src/pubsub/tests/panicking_registry.rs b/client/utils/src/pubsub/tests/panicking_registry.rs index 26ce63bd51b01..cfe8168d80229 100644 --- a/client/utils/src/pubsub/tests/panicking_registry.rs +++ b/client/utils/src/pubsub/tests/panicking_registry.rs @@ -30,7 +30,7 @@ fn t01() { let hub = TestHub::new(TK); assert_hub_props(&hub, 0, 0); - let rx_01 = hub.subscribe(SubsKey::new()); + let rx_01 = hub.subscribe(SubsKey::new(), 100_000); assert_hub_props(&hub, 1, 1); std::mem::drop(rx_01); @@ -45,17 +45,17 @@ fn t02() { assert_hub_props(&hub, 0, 0); // Subscribe rx-01 - let rx_01 = hub.subscribe(SubsKey::new()); + let rx_01 = hub.subscribe(SubsKey::new(), 100_000); assert_hub_props(&hub, 1, 1); // Subscribe rx-02 so that its unsubscription will lead to an attempt to drop rx-01 in the // middle of unsubscription of rx-02 - let rx_02 = hub.subscribe(SubsKey::new().with_receiver(rx_01)); + let rx_02 = hub.subscribe(SubsKey::new().with_receiver(rx_01), 100_000); assert_hub_props(&hub, 2, 2); // Subscribe rx-03 in order to see that it will receive messages after the unclean // unsubscription - let mut rx_03 = hub.subscribe(SubsKey::new()); + let mut rx_03 = hub.subscribe(SubsKey::new(), 100_000); assert_hub_props(&hub, 3, 3); // drop rx-02 leads to an attempt to unsubscribe rx-01 @@ -69,7 +69,7 @@ fn t02() { // Subscribe rx-04 in order to see that it will receive messages after the unclean // unsubscription - let mut rx_04 = hub.subscribe(SubsKey::new()); + let mut rx_04 = hub.subscribe(SubsKey::new(), 100_000); assert_hub_props(&hub, 3, 3); hub.send(2); @@ -96,8 +96,8 @@ fn t02() { } async fn add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(hub: &TestHub) { - let rx_01 = hub.subscribe(SubsKey::new()); - let rx_02 = hub.subscribe(SubsKey::new()); + let rx_01 = hub.subscribe(SubsKey::new(), 100_000); + let rx_02 = hub.subscribe(SubsKey::new(), 100_000); hub.send(1); hub.send(2); @@ -121,9 +121,8 @@ fn t03() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - assert!(catch_unwind(AssertUnwindSafe( - || hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnSubscribePanicBefore)) - )) + assert!(catch_unwind(AssertUnwindSafe(|| hub + .subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnSubscribePanicBefore), 100_000))) .is_err()); assert_hub_props(&hub, 0, 0); @@ -141,9 +140,8 @@ fn t04() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - assert!(catch_unwind(AssertUnwindSafe( - || hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnSubscribePanicAfter)) - )) + assert!(catch_unwind(AssertUnwindSafe(|| hub + .subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnSubscribePanicAfter), 100_000))) .is_err()); // the registry has panicked after it has added a subs-id into its internal storage — the @@ -163,8 +161,8 @@ fn t05() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - let rx_01 = - hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnUnsubscribePanicBefore)); + let rx_01 = hub + .subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnUnsubscribePanicBefore), 100_000); assert_hub_props(&hub, 1, 1); add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; @@ -189,7 +187,8 @@ fn t06() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - let rx_01 = hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnUnsubscribePanicAfter)); + let rx_01 = hub + .subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnUnsubscribePanicAfter), 100_000); assert_hub_props(&hub, 1, 1); add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; @@ -214,7 +213,8 @@ fn t07() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - let rx_01 = hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnDispatchPanicBefore)); + let rx_01 = + hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnDispatchPanicBefore), 100_000); assert_hub_props(&hub, 1, 1); assert!(catch_unwind(AssertUnwindSafe(|| hub.send(1))).is_err()); assert_hub_props(&hub, 1, 1); @@ -235,7 +235,8 @@ fn t08() { add_some_subscribers_see_that_messages_are_delivered_and_unsubscribe(&hub).await; assert_hub_props(&hub, 0, 0); - let rx_01 = hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnDispatchPanicAfter)); + let rx_01 = + hub.subscribe(SubsKey::new().with_panic(SubsKeyPanic::OnDispatchPanicAfter), 100_000); assert_hub_props(&hub, 1, 1); assert!(catch_unwind(AssertUnwindSafe(|| hub.send(1))).is_err()); assert_hub_props(&hub, 1, 1); diff --git a/client/utils/src/status_sinks.rs b/client/utils/src/status_sinks.rs index a1d965d08085e..c536e2c18c6a1 100644 --- a/client/utils/src/status_sinks.rs +++ b/client/utils/src/status_sinks.rs @@ -58,7 +58,7 @@ impl Default for StatusSinks { impl StatusSinks { /// Builds a new empty collection. pub fn new() -> StatusSinks { - let (entries_tx, entries_rx) = tracing_unbounded("status-sinks-entries"); + let (entries_tx, entries_rx) = tracing_unbounded("status-sinks-entries", 100_000); StatusSinks { inner: Mutex::new(Inner { entries: stream::FuturesUnordered::new(), entries_rx }), @@ -196,7 +196,7 @@ mod tests { let status_sinks = StatusSinks::new(); - let (tx, rx) = tracing_unbounded("test"); + let (tx, rx) = tracing_unbounded("test", 100_000); status_sinks.push(Duration::from_millis(100), tx); let mut val_order = 5; diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index 635e22c5d58aa..b19fbd57c5030 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -153,7 +153,15 @@ impl Pallet { /// /// The storage will be applied immediately. /// And aura consensus log will be appended to block's log. + /// + /// This is a no-op if `new` is empty. pub fn change_authorities(new: BoundedVec) { + if new.is_empty() { + log::warn!(target: LOG_TARGET, "Ignoring empty authority change."); + + return + } + >::put(&new); let log = DigestItem::Consensus( diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 16b2b2119793a..eb87d65b0549f 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -572,6 +572,8 @@ impl Pallet { /// /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. + /// + /// This doesn't do anything if `authorities` is empty. pub fn enact_epoch_change( authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, @@ -580,10 +582,25 @@ impl Pallet { // by the session module to be called before this. debug_assert!(Self::initialized().is_some()); - // Update epoch index - let epoch_index = EpochIndex::::get() - .checked_add(1) - .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + if authorities.is_empty() { + log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); + + return + } + + // Update epoch index. + // + // NOTE: we figure out the epoch index from the slot, which may not + // necessarily be contiguous if the chain was offline for more than + // `T::EpochDuration` slots. When skipping from epoch N to e.g. N+4, we + // will be using the randomness and authorities for that epoch that had + // been previously announced for epoch N+1, and the randomness collected + // during the current epoch (N) will be used for epoch N+5. + let epoch_index = sp_consensus_babe::epoch_index( + CurrentSlot::::get(), + GenesisSlot::::get(), + T::EpochDuration::get(), + ); EpochIndex::::put(epoch_index); Authorities::::put(authorities); @@ -630,11 +647,16 @@ impl Pallet { } } - /// Finds the start slot of the current epoch. only guaranteed to - /// give correct results after `initialize` of the first block - /// in the chain (as its result is based off of `GenesisSlot`). + /// Finds the start slot of the current epoch. + /// + /// Only guaranteed to give correct results after `initialize` of the first + /// block in the chain (as its result is based off of `GenesisSlot`). pub fn current_epoch_start() -> Slot { - Self::epoch_start(EpochIndex::::get()) + sp_consensus_babe::epoch_start_slot( + EpochIndex::::get(), + GenesisSlot::::get(), + T::EpochDuration::get(), + ) } /// Produces information about the current epoch. @@ -658,9 +680,15 @@ impl Pallet { if u64 is not enough we should crash for safety; qed.", ); + let start_slot = sp_consensus_babe::epoch_start_slot( + next_epoch_index, + GenesisSlot::::get(), + T::EpochDuration::get(), + ); + Epoch { epoch_index: next_epoch_index, - start_slot: Self::epoch_start(next_epoch_index), + start_slot, duration: T::EpochDuration::get(), authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), @@ -672,17 +700,6 @@ impl Pallet { } } - fn epoch_start(epoch_index: u64) -> Slot { - // (epoch_index * epoch_duration) + genesis_slot - - const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ - if u64 is not enough we should crash for safety; qed."; - - let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); - - epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() - } - fn deposit_consensus(new: U) { let log = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); >::deposit_log(log) diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 204de8aae172e..dbeb588c1830e 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -195,7 +195,7 @@ impl pallet_staking::Config for Test { type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index d4132e6378540..0b8a02547144b 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -948,3 +948,34 @@ fn generate_equivocation_report_blob() { println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } + +#[test] +fn skipping_over_epochs_works() { + let mut ext = new_test_ext(3); + + ext.execute_with(|| { + let epoch_duration: u64 = ::EpochDuration::get(); + + // this sets the genesis slot to 100; + let genesis_slot = 100; + go_to_block(1, genesis_slot); + + // we will author all blocks from epoch #0 and arrive at a point where + // we are in epoch #1. we should already have the randomness ready that + // will be used in epoch #2 + progress_to_block(epoch_duration + 1); + assert_eq!(EpochIndex::::get(), 1); + + // genesis randomness is an array of zeros + let randomness_for_epoch_2 = NextRandomness::::get(); + assert!(randomness_for_epoch_2 != [0; 32]); + + // we will now create a block for a slot that is part of epoch #4. + // we should appropriately increment the epoch index as well as re-use + // the randomness from epoch #2 on epoch #4 + go_to_block(System::block_number() + 1, genesis_slot + epoch_duration * 4); + + assert_eq!(EpochIndex::::get(), 4); + assert_eq!(Randomness::::get(), randomness_for_epoch_2); + }); +} diff --git a/frame/balances/src/migration.rs b/frame/balances/src/migration.rs index 08e1d8c7a2c74..b660ec9fd3235 100644 --- a/frame/balances/src/migration.rs +++ b/frame/balances/src/migration.rs @@ -69,3 +69,29 @@ impl, A: Get>, I: 'static> OnRuntimeUpgrade migrate_v0_to_v1::(&A::get()) } } + +pub struct ResetInactive(PhantomData<(T, I)>); +impl, I: 'static> OnRuntimeUpgrade for ResetInactive { + fn on_runtime_upgrade() -> Weight { + let onchain_version = Pallet::::on_chain_storage_version(); + + if onchain_version == 1 { + // Remove the old `StorageVersion` type. + frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( + Pallet::::name().as_bytes(), + "StorageVersion".as_bytes(), + )); + + InactiveIssuance::::kill(); + + // Set storage version to `0`. + StorageVersion::new(0).put::>(); + + log::info!(target: "runtime::balances", "Storage to version 0"); + T::DbWeight::get().reads_writes(1, 2) + } else { + log::info!(target: "runtime::balances", "Migration did not execute. This probably should be removed"); + T::DbWeight::get().reads(1) + } + } +} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f4dc3a0ec0250..df1dd24856a8a 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -279,7 +279,7 @@ pub trait Ext: sealing::Sealed { /// when the code is executing on-chain. /// /// Returns `true` if debug message recording is enabled. Otherwise `false` is returned. - fn append_debug_buffer(&mut self, msg: &str) -> Result; + fn append_debug_buffer(&mut self, msg: &str) -> bool; /// Call some dispatchable and return the result. fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; @@ -1334,16 +1334,34 @@ where &mut self.top_frame_mut().nested_gas } - fn append_debug_buffer(&mut self, msg: &str) -> Result { + fn append_debug_buffer(&mut self, msg: &str) -> bool { if let Some(buffer) = &mut self.debug_message { - if !msg.is_empty() { - buffer - .try_extend(&mut msg.bytes()) - .map_err(|_| Error::::DebugBufferExhausted)?; - } - Ok(true) + let mut msg = msg.bytes(); + let num_drain = { + let capacity = DebugBufferVec::::bound().checked_sub(buffer.len()).expect( + " + `buffer` is of type `DebugBufferVec`, + `DebugBufferVec` is a `BoundedVec`, + `BoundedVec::len()` <= `BoundedVec::bound()`; + qed + ", + ); + msg.len().saturating_sub(capacity).min(buffer.len()) + }; + buffer.drain(0..num_drain); + buffer + .try_extend(&mut msg) + .map_err(|_| { + log::debug!( + target: "runtime::contracts", + "Debug message to big (size={}) for debug buffer (bound={})", + msg.len(), DebugBufferVec::::bound(), + ); + }) + .ok(); + true } else { - Ok(false) + false } } @@ -2511,12 +2529,8 @@ mod tests { #[test] fn printing_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { - ctx.ext - .append_debug_buffer("This is a test") - .expect("Maximum allowed debug buffer size exhausted!"); - ctx.ext - .append_debug_buffer("More text") - .expect("Maximum allowed debug buffer size exhausted!"); + ctx.ext.append_debug_buffer("This is a test"); + ctx.ext.append_debug_buffer("More text"); exec_success() }); @@ -2549,12 +2563,8 @@ mod tests { #[test] fn printing_works_on_fail() { let code_hash = MockLoader::insert(Call, |ctx, _| { - ctx.ext - .append_debug_buffer("This is a test") - .expect("Maximum allowed debug buffer size exhausted!"); - ctx.ext - .append_debug_buffer("More text") - .expect("Maximum allowed debug buffer size exhausted!"); + ctx.ext.append_debug_buffer("This is a test"); + ctx.ext.append_debug_buffer("More text"); exec_trapped() }); @@ -2587,7 +2597,7 @@ mod tests { #[test] fn debug_buffer_is_limited() { let code_hash = MockLoader::insert(Call, move |ctx, _| { - ctx.ext.append_debug_buffer("overflowing bytes")?; + ctx.ext.append_debug_buffer("overflowing bytes"); exec_success() }); @@ -2602,20 +2612,22 @@ mod tests { set_balance(&ALICE, min_balance * 10); place_contract(&BOB, code_hash); let mut storage_meter = storage::meter::Meter::new(&ALICE, Some(0), 0).unwrap(); - assert_err!( - MockStack::run_call( - ALICE, - BOB, - &mut gas_meter, - &mut storage_meter, - &schedule, - 0, - vec![], - Some(&mut debug_buffer), - Determinism::Deterministic, - ) - .map_err(|e| e.error), - Error::::DebugBufferExhausted + MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &mut storage_meter, + &schedule, + 0, + vec![], + Some(&mut debug_buffer), + Determinism::Deterministic, + ) + .unwrap(); + assert_eq!( + &String::from_utf8(debug_buffer[DebugBufferVec::::bound() - 17..].to_vec()) + .unwrap(), + "overflowing bytes" ); }); } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 39626e43f4653..778ceec961500 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -857,9 +857,6 @@ pub mod pallet { CodeRejected, /// An indetermistic code was used in a context where this is not permitted. Indeterministic, - /// The debug buffer size used during contract execution exceeded the limit determined by - /// the `MaxDebugBufferLen` pallet config parameter. - DebugBufferExhausted, } /// A mapping from an original code hash to the original code, untouched by instrumentation. diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 903ca0abb5e06..553bae59e78b9 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -588,9 +588,9 @@ mod tests { fn gas_meter(&mut self) -> &mut GasMeter { &mut self.gas_meter } - fn append_debug_buffer(&mut self, msg: &str) -> Result { + fn append_debug_buffer(&mut self, msg: &str) -> bool { self.debug_buffer.extend(msg.as_bytes()); - Ok(true) + true } fn call_runtime( &self, diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index aaacb9e5f80e6..7d67adc3ded33 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -2380,11 +2380,11 @@ pub mod env { str_len: u32, ) -> Result { ctx.charge_gas(RuntimeCosts::DebugMessage)?; - if ctx.ext.append_debug_buffer("")? { + if ctx.ext.append_debug_buffer("") { let data = ctx.read_sandbox_memory(memory, str_ptr, str_len)?; let msg = core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; - ctx.ext.append_debug_buffer(msg)?; + ctx.ext.append_debug_buffer(msg); return Ok(ReturnCode::Success) } Ok(ReturnCode::LoggingDisabled) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 7f226826cbc53..f2faeebc13478 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -91,7 +91,7 @@ pub mod pallet { DispatchResult, }; use sp_staking::{EraIndex, StakingInterface}; - use sp_std::{prelude::*, vec::Vec}; + use sp_std::{collections::btree_set::BTreeSet, prelude::*, vec::Vec}; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] @@ -429,9 +429,9 @@ pub mod pallet { } }; - let check_stash = |stash, deposit, eras_checked: &mut u32| { + let check_stash = |stash, deposit, eras_checked: &mut BTreeSet| { let is_exposed = unchecked_eras_to_check.iter().any(|e| { - eras_checked.saturating_inc(); + eras_checked.insert(*e); T::Staking::is_exposed_in_era(&stash, e) }); @@ -452,7 +452,7 @@ pub mod pallet { ::WeightInfo::on_idle_unstake() } else { // eras checked so far. - let mut eras_checked = 0u32; + let mut eras_checked = BTreeSet::::new(); let pre_length = stashes.len(); let stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize> = stashes @@ -468,7 +468,7 @@ pub mod pallet { log!( debug, "checked {:?} eras, pre stashes: {:?}, post: {:?}", - eras_checked, + eras_checked.len(), pre_length, post_length, ); @@ -489,7 +489,9 @@ pub mod pallet { }, } - ::WeightInfo::on_idle_check(validator_count * eras_checked) + ::WeightInfo::on_idle_check( + validator_count * eras_checked.len() as u32, + ) } } } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index b67dcf581ed97..3f974e5e1a9d6 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -140,7 +140,7 @@ impl pallet_staking::Config for Runtime { type Reward = (); type SessionsPerEra = (); type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 1a97b1345fe5d..131f6cafcd179 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -199,7 +199,7 @@ impl pallet_staking::Config for Test { type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/nfts/Cargo.toml b/frame/nfts/Cargo.toml new file mode 100644 index 0000000000000..109dffdd10f50 --- /dev/null +++ b/frame/nfts/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "pallet-nfts" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME NFTs pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +enumflags2 = { version = "0.7.5" } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", path = "../../primitives/io" } +sp-std = { version = "5.0.0", path = "../../primitives/std" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nfts/README.md b/frame/nfts/README.md new file mode 100644 index 0000000000000..7de4b9440e7f5 --- /dev/null +++ b/frame/nfts/README.md @@ -0,0 +1,106 @@ +# NFTs pallet + +A pallet for dealing with non-fungible assets. + +## Overview + +The NFTs pallet provides functionality for non-fungible tokens' management, including: + +* Collection Creation +* NFT Minting +* NFT Transfers and Atomic Swaps +* NFT Trading methods +* Attributes Management +* NFT Burning + +To use it in your runtime, you need to implement [`nfts::Config`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/trait.Config.html). + +The supported dispatchable functions are documented in the [`nfts::Call`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/enum.Call.html) enum. + +### Terminology + +* **Collection creation:** The creation of a new collection. +* **NFT minting:** The action of creating a new item within a collection. +* **NFT transfer:** The action of sending an item from one account to another. +* **Atomic swap:** The action of exchanging items between accounts without needing a 3rd party service. +* **NFT burning:** The destruction of an item. +* **Non-fungible token (NFT):** An item for which each unit has unique characteristics. There is exactly + one instance of such an item in existence and there is exactly one owning account (though that owning account could be a proxy account or multi-sig account). +* **Soul Bound NFT:** An item that is non-transferable from the account which it is minted into. + +### Goals + +The NFTs pallet in Substrate is designed to make the following possible: + +* Allow accounts to permissionlessly create nft collections. +* Allow a named (permissioned) account to mint and burn unique items within a collection. +* Move items between accounts permissionlessly. +* Allow a named (permissioned) account to freeze and unfreeze items within a + collection or the entire collection. +* Allow the owner of an item to delegate the ability to transfer the item to some + named third-party. +* Allow third-parties to store information in an NFT _without_ owning it (Eg. save game state). + +## Interface + +### Permissionless dispatchables + +* `create`: Create a new collection by placing a deposit. +* `mint`: Mint a new item within a collection (when the minting is public). +* `transfer`: Send an item to a new owner. +* `redeposit`: Update the deposit amount of an item, potentially freeing funds. +* `approve_transfer`: Name a delegate who may authorize a transfer. +* `cancel_approval`: Revert the effects of a previous `approve_transfer`. +* `approve_item_attributes`: Name a delegate who may change item's attributes within a namespace. +* `cancel_item_attributes_approval`: Revert the effects of a previous `approve_item_attributes`. +* `set_price`: Set the price for an item. +* `buy_item`: Buy an item. +* `pay_tips`: Pay tips, could be used for paying the creator royalties. +* `create_swap`: Create an offer to swap an NFT for another NFT and optionally some fungibles. +* `cancel_swap`: Cancel previously created swap offer. +* `claim_swap`: Swap items in an atomic way. + + +### Permissioned dispatchables + +* `destroy`: Destroy a collection. This destroys all the items inside the collection and refunds the deposit. +* `force_mint`: Mint a new item within a collection. +* `burn`: Destroy an item within a collection. +* `lock_item_transfer`: Prevent an individual item from being transferred. +* `unlock_item_transfer`: Revert the effects of a previous `lock_item_transfer`. +* `clear_all_transfer_approvals`: Clears all transfer approvals set by calling the `approve_transfer`. +* `lock_collection`: Prevent all items within a collection from being transferred (making them all `soul bound`). +* `lock_item_properties`: Lock item's metadata or attributes. +* `transfer_ownership`: Alter the owner of a collection, moving all associated deposits. (Ownership of individual items will not be affected.) +* `set_team`: Alter the permissioned accounts of a collection. +* `set_collection_max_supply`: Change the max supply of a collection. +* `update_mint_settings`: Update the minting settings for collection. + + +### Metadata (permissioned) dispatchables + +* `set_attribute`: Set a metadata attribute of an item or collection. +* `clear_attribute`: Remove a metadata attribute of an item or collection. +* `set_metadata`: Set general metadata of an item (E.g. an IPFS address of an image url). +* `clear_metadata`: Remove general metadata of an item. +* `set_collection_metadata`: Set general metadata of a collection. +* `clear_collection_metadata`: Remove general metadata of a collection. + + +### Force (i.e. governance) dispatchables + +* `force_create`: Create a new collection (the collection id can not be chosen). +* `force_collection_owner`: Change collection's owner. +* `force_collection_config`: Change collection's config. +* `force_set_attribute`: Set an attribute. + +Please refer to the [`Call`](https://paritytech.github.io/substrate/master/pallet_nfts/pallet/enum.Call.html) enum +and its associated variants for documentation on each function. + +## Related Modules + +* [`System`](https://docs.rs/frame-system/latest/frame_system/) +* [`Support`](https://docs.rs/frame-support/latest/frame_support/) +* [`Assets`](https://docs.rs/pallet-assets/latest/pallet_assets/) + +License: Apache-2.0 diff --git a/frame/nfts/src/benchmarking.rs b/frame/nfts/src/benchmarking.rs new file mode 100644 index 0000000000000..6517445da672d --- /dev/null +++ b/frame/nfts/src/benchmarking.rs @@ -0,0 +1,718 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Nfts pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use enumflags2::{BitFlag, BitFlags}; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, +}; +use frame_support::{ + assert_ok, + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, + BoundedVec, +}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::{Bounded, One}; +use sp_std::prelude::*; + +use crate::Pallet as Nfts; + +const SEED: u32 = 0; + +fn create_collection, I: 'static>( +) -> (T::CollectionId, T::AccountId, AccountIdLookupOf) { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let collection = T::Helper::collection(0); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + assert_ok!(Nfts::::force_create( + SystemOrigin::Root.into(), + caller_lookup.clone(), + default_collection_config::() + )); + (collection, caller, caller_lookup) +} + +fn add_collection_metadata, I: 'static>() -> (T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert_ok!(Nfts::::set_collection_metadata( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + )); + (caller, caller_lookup) +} + +fn mint_item, I: 'static>( + index: u16, +) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let item = T::Helper::item(index); + assert_ok!(Nfts::::mint( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + item, + caller_lookup.clone(), + None, + )); + (item, caller, caller_lookup) +} + +fn add_item_metadata, I: 'static>( + item: T::ItemId, +) -> (T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert_ok!(Nfts::::set_metadata( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + item, + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + )); + (caller, caller_lookup) +} + +fn add_item_attribute, I: 'static>( + item: T::ItemId, +) -> (BoundedVec, T::AccountId, AccountIdLookupOf) { + let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let key: BoundedVec<_, _> = vec![0; T::KeyLimit::get() as usize].try_into().unwrap(); + assert_ok!(Nfts::::set_attribute( + SystemOrigin::Signed(caller.clone()).into(), + T::Helper::collection(0), + Some(item), + AttributeNamespace::CollectionOwner, + key.clone(), + vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), + )); + (key, caller, caller_lookup) +} + +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +fn make_collection_config, I: 'static>( + disable_settings: BitFlags, +) -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::from_disabled(disable_settings), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn default_collection_config, I: 'static>() -> CollectionConfigFor { + make_collection_config::(CollectionSetting::empty()) +} + +fn default_item_config() -> ItemConfig { + ItemConfig { settings: ItemSettings::all_enabled() } +} + +benchmarks_instance_pallet! { + create { + let collection = T::Helper::collection(0); + let origin = T::CreateOrigin::successful_origin(&collection); + let caller = T::CreateOrigin::ensure_origin(origin.clone(), &collection).unwrap(); + whitelist_account!(caller); + let admin = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let call = Call::::create { admin, config: default_collection_config::() }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, caller_lookup, default_collection_config::()) + verify { + assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); + } + + destroy { + let n in 0 .. 1_000; + let m in 0 .. 1_000; + let a in 0 .. 1_000; + + let (collection, caller, _) = create_collection::(); + add_collection_metadata::(); + for i in 0..n { + mint_item::(i as u16); + } + for i in 0..m { + if !Item::::contains_key(collection, T::Helper::item(i as u16)) { + mint_item::(i as u16); + } + add_item_metadata::(T::Helper::item(i as u16)); + } + for i in 0..a { + if !Item::::contains_key(collection, T::Helper::item(i as u16)) { + mint_item::(i as u16); + } + add_item_attribute::(T::Helper::item(i as u16)); + } + let witness = Collection::::get(collection).unwrap().destroy_witness(); + }: _(SystemOrigin::Signed(caller), collection, witness) + verify { + assert_last_event::(Event::Destroyed { collection }.into()); + } + + mint { + let (collection, caller, caller_lookup) = create_collection::(); + let item = T::Helper::item(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, None) + verify { + assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); + } + + force_mint { + let (collection, caller, caller_lookup) = create_collection::(); + let item = T::Helper::item(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, caller_lookup, default_item_config()) + verify { + assert_last_event::(Event::Issued { collection, item, owner: caller }.into()); + } + + burn { + let (collection, caller, caller_lookup) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(caller_lookup)) + verify { + assert_last_event::(Event::Burned { collection, item, owner: caller }.into()); + } + + transfer { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, target_lookup) + verify { + assert_last_event::(Event::Transferred { collection, item, from: caller, to: target }.into()); + } + + redeposit { + let i in 0 .. 5_000; + let (collection, caller, _) = create_collection::(); + let items = (0..i).map(|x| mint_item::(x as u16).0).collect::>(); + Nfts::::force_collection_config( + SystemOrigin::Root.into(), + collection, + make_collection_config::(CollectionSetting::DepositRequired.into()), + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, items.clone()) + verify { + assert_last_event::(Event::Redeposited { collection, successful_items: items }.into()); + } + + lock_item_transfer { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller.clone()), T::Helper::collection(0), T::Helper::item(0)) + verify { + assert_last_event::(Event::ItemTransferLocked { collection: T::Helper::collection(0), item: T::Helper::item(0) }.into()); + } + + unlock_item_transfer { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + Nfts::::lock_item_transfer( + SystemOrigin::Signed(caller.clone()).into(), + collection, + item, + )?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item) + verify { + assert_last_event::(Event::ItemTransferUnlocked { collection, item }.into()); + } + + lock_collection { + let (collection, caller, _) = create_collection::(); + let lock_settings = CollectionSettings::from_disabled( + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes | + CollectionSetting::UnlockedMaxSupply, + ); + }: _(SystemOrigin::Signed(caller.clone()), collection, lock_settings) + verify { + assert_last_event::(Event::CollectionLocked { collection }.into()); + } + + transfer_ownership { + let (collection, caller, _) = create_collection::(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let origin = SystemOrigin::Signed(target.clone()).into(); + Nfts::::set_accept_ownership(origin, Some(collection))?; + }: _(SystemOrigin::Signed(caller), collection, target_lookup) + verify { + assert_last_event::(Event::OwnerChanged { collection, new_owner: target }.into()); + } + + set_team { + let (collection, caller, _) = create_collection::(); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), collection, target0, target1, target2) + verify { + assert_last_event::(Event::TeamChanged{ + collection, + issuer: account("target", 0, SEED), + admin: account("target", 1, SEED), + freezer: account("target", 2, SEED), + }.into()); + } + + force_collection_owner { + let (collection, _, _) = create_collection::(); + let origin = T::ForceOrigin::successful_origin(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let call = Call::::force_collection_owner { + collection, + owner: target_lookup, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::OwnerChanged { collection, new_owner: target }.into()); + } + + force_collection_config { + let (collection, caller, _) = create_collection::(); + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_collection_config { + collection, + config: make_collection_config::(CollectionSetting::DepositRequired.into()), + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::CollectionConfigChanged { collection }.into()); + } + + lock_item_properties { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let lock_metadata = true; + let lock_attributes = true; + }: _(SystemOrigin::Signed(caller), collection, item, lock_metadata, lock_attributes) + verify { + assert_last_event::(Event::ItemPropertiesLocked { collection, item, lock_metadata, lock_attributes }.into()); + } + + set_attribute { + let key: BoundedVec<_, _> = vec![0u8; T::KeyLimit::get() as usize].try_into().unwrap(); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone(), value.clone()) + verify { + assert_last_event::( + Event::AttributeSet { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + value, + } + .into(), + ); + } + + force_set_attribute { + let key: BoundedVec<_, _> = vec![0u8; T::KeyLimit::get() as usize].try_into().unwrap(); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Root, Some(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone(), value.clone()) + verify { + assert_last_event::( + Event::AttributeSet { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + value, + } + .into(), + ); + } + + clear_attribute { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + add_item_metadata::(item); + let (key, ..) = add_item_attribute::(item); + }: _(SystemOrigin::Signed(caller), collection, Some(item), AttributeNamespace::CollectionOwner, key.clone()) + verify { + assert_last_event::( + Event::AttributeCleared { + collection, + maybe_item: Some(item), + namespace: AttributeNamespace::CollectionOwner, + key, + }.into(), + ); + } + + approve_item_attributes { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller), collection, item, target_lookup) + verify { + assert_last_event::( + Event::ItemAttributesApprovalAdded { + collection, + item, + delegate: target, + } + .into(), + ); + } + + cancel_item_attributes_approval { + let n in 0 .. 1_000; + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + Nfts::::approve_item_attributes( + SystemOrigin::Signed(caller.clone()).into(), + collection, + item, + target_lookup.clone(), + )?; + T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + for i in 0..n { + let mut key = vec![0u8; T::KeyLimit::get() as usize]; + let mut s = Vec::from((i as u16).to_be_bytes()); + key.truncate(s.len()); + key.append(&mut s); + + Nfts::::set_attribute( + SystemOrigin::Signed(target.clone()).into(), + T::Helper::collection(0), + Some(item), + AttributeNamespace::Account(target.clone()), + key.try_into().unwrap(), + value.clone(), + )?; + } + let witness = CancelAttributesApprovalWitness { account_attributes: n }; + }: _(SystemOrigin::Signed(caller), collection, item, target_lookup, witness) + verify { + assert_last_event::( + Event::ItemAttributesApprovalRemoved { + collection, + item, + delegate: target, + } + .into(), + ); + } + + set_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + }: _(SystemOrigin::Signed(caller), collection, item, data.clone()) + verify { + assert_last_event::(Event::ItemMetadataSet { collection, item, data }.into()); + } + + clear_metadata { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + add_item_metadata::(item); + }: _(SystemOrigin::Signed(caller), collection, item) + verify { + assert_last_event::(Event::ItemMetadataCleared { collection, item }.into()); + } + + set_collection_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (collection, caller, _) = create_collection::(); + }: _(SystemOrigin::Signed(caller), collection, data.clone()) + verify { + assert_last_event::(Event::CollectionMetadataSet { collection, data }.into()); + } + + clear_collection_metadata { + let (collection, caller, _) = create_collection::(); + add_collection_metadata::(); + }: _(SystemOrigin::Signed(caller), collection) + verify { + assert_last_event::(Event::CollectionMetadataCleared { collection }.into()); + } + + approve_transfer { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let deadline = T::BlockNumber::max_value(); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) + verify { + assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); + } + + cancel_approval { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let origin = SystemOrigin::Signed(caller.clone()).into(); + let deadline = T::BlockNumber::max_value(); + Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) + verify { + assert_last_event::(Event::ApprovalCancelled { collection, item, owner: caller, delegate }.into()); + } + + clear_all_transfer_approvals { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let origin = SystemOrigin::Signed(caller.clone()).into(); + let deadline = T::BlockNumber::max_value(); + Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item) + verify { + assert_last_event::(Event::AllApprovalsCancelled {collection, item, owner: caller}.into()); + } + + set_accept_ownership { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let collection = T::Helper::collection(0); + }: _(SystemOrigin::Signed(caller.clone()), Some(collection)) + verify { + assert_last_event::(Event::OwnershipAcceptanceChanged { + who: caller, + maybe_collection: Some(collection), + }.into()); + } + + set_collection_max_supply { + let (collection, caller, _) = create_collection::(); + }: _(SystemOrigin::Signed(caller.clone()), collection, u32::MAX) + verify { + assert_last_event::(Event::CollectionMaxSupplySet { + collection, + max_supply: u32::MAX, + }.into()); + } + + update_mint_settings { + let (collection, caller, _) = create_collection::(); + let mint_settings = MintSettings { + mint_type: MintType::HolderOf(T::Helper::collection(0)), + start_block: Some(One::one()), + end_block: Some(One::one()), + price: Some(ItemPrice::::from(1u32)), + default_item_settings: ItemSettings::all_enabled(), + }; + }: _(SystemOrigin::Signed(caller.clone()), collection, mint_settings) + verify { + assert_last_event::(Event::CollectionMintSettingsUpdated { collection }.into()); + } + + set_price { + let (collection, caller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let price = ItemPrice::::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), collection, item, Some(price), Some(delegate_lookup)) + verify { + assert_last_event::(Event::ItemPriceSet { + collection, + item, + price, + whitelisted_buyer: Some(delegate), + }.into()); + } + + buy_item { + let (collection, seller, _) = create_collection::(); + let (item, ..) = mint_item::(0); + let buyer: T::AccountId = account("buyer", 0, SEED); + let buyer_lookup = T::Lookup::unlookup(buyer.clone()); + let price = ItemPrice::::from(0u32); + let origin = SystemOrigin::Signed(seller.clone()).into(); + Nfts::::set_price(origin, collection, item, Some(price.clone()), Some(buyer_lookup))?; + T::Currency::make_free_balance_be(&buyer, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(buyer.clone()), collection, item, price.clone()) + verify { + assert_last_event::(Event::ItemBought { + collection, + item, + price, + seller, + buyer, + }.into()); + } + + pay_tips { + let n in 0 .. T::MaxTips::get() as u32; + let amount = BalanceOf::::from(100u32); + let caller: T::AccountId = whitelisted_caller(); + let collection = T::Helper::collection(0); + let item = T::Helper::item(0); + let tips: BoundedVec<_, _> = vec![ + ItemTip + { collection, item, receiver: caller.clone(), amount }; n as usize + ].try_into().unwrap(); + }: _(SystemOrigin::Signed(caller.clone()), tips) + verify { + if !n.is_zero() { + assert_last_event::(Event::TipSent { + collection, + item, + sender: caller.clone(), + receiver: caller.clone(), + amount, + }.into()); + } + } + + create_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(100u32); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = T::MaxDeadlineDuration::get(); + frame_system::Pallet::::set_block_number(One::one()); + }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) + verify { + let current_block = frame_system::Pallet::::block_number(); + assert_last_event::(Event::SwapCreated { + offered_collection: collection, + offered_item: item1, + desired_collection: collection, + desired_item: Some(item2), + price: Some(price_with_direction), + deadline: current_block.saturating_add(duration), + }.into()); + } + + cancel_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(100u32); + let origin = SystemOrigin::Signed(caller.clone()).into(); + let duration = T::MaxDeadlineDuration::get(); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + frame_system::Pallet::::set_block_number(One::one()); + Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; + }: _(SystemOrigin::Signed(caller.clone()), collection, item1) + verify { + assert_last_event::(Event::SwapCancelled { + offered_collection: collection, + offered_item: item1, + desired_collection: collection, + desired_item: Some(item2), + price: Some(price_with_direction), + deadline: duration.saturating_add(One::one()), + }.into()); + } + + claim_swap { + let (collection, caller, _) = create_collection::(); + let (item1, ..) = mint_item::(0); + let (item2, ..) = mint_item::(1); + let price = ItemPrice::::from(0u32); + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = T::MaxDeadlineDuration::get(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let origin = SystemOrigin::Signed(caller.clone()); + frame_system::Pallet::::set_block_number(One::one()); + Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; + Nfts::::create_swap( + origin.clone().into(), + collection, + item1, + collection, + Some(item2), + Some(price_with_direction.clone()), + duration, + )?; + }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) + verify { + let current_block = frame_system::Pallet::::block_number(); + assert_last_event::(Event::SwapClaimed { + sent_collection: collection, + sent_item: item2, + sent_item_owner: target, + received_collection: collection, + received_item: item1, + received_item_owner: caller, + price: Some(price_with_direction), + deadline: duration.saturating_add(One::one()), + }.into()); + } + + impl_benchmark_test_suite!(Nfts, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/nfts/src/common_functions.rs b/frame/nfts/src/common_functions.rs new file mode 100644 index 0000000000000..9c0faeb6b7c77 --- /dev/null +++ b/frame/nfts/src/common_functions.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various pieces of common functionality. + +use super::*; + +impl, I: 'static> Pallet { + /// Get the owner of the item, if the item exists. + pub fn owner(collection: T::CollectionId, item: T::ItemId) -> Option { + Item::::get(collection, item).map(|i| i.owner) + } + + /// Get the owner of the collection, if the collection exists. + pub fn collection_owner(collection: T::CollectionId) -> Option { + Collection::::get(collection).map(|i| i.owner) + } + + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn set_next_id(id: T::CollectionId) { + NextCollectionId::::set(Some(id)); + } + + #[cfg(test)] + pub fn get_next_id() -> T::CollectionId { + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()) + } +} diff --git a/frame/nfts/src/features/approvals.rs b/frame/nfts/src/features/approvals.rs new file mode 100644 index 0000000000000..cb5279fd949db --- /dev/null +++ b/frame/nfts/src/features/approvals.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_approve_transfer( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + maybe_deadline: Option<::BlockNumber>, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Approvals), + Error::::MethodDisabled + ); + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + + let now = frame_system::Pallet::::block_number(); + let deadline = maybe_deadline.map(|d| d.saturating_add(now)); + + details + .approvals + .try_insert(delegate.clone(), deadline) + .map_err(|_| Error::::ReachedApprovalLimit)?; + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::TransferApproved { + collection, + item, + owner: details.owner, + delegate, + deadline, + }); + + Ok(()) + } + + pub(crate) fn do_cancel_approval( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + ) -> DispatchResult { + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + + let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; + + let is_past_deadline = if let Some(deadline) = maybe_deadline { + let now = frame_system::Pallet::::block_number(); + now > *deadline + } else { + false + }; + + if !is_past_deadline { + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + } + + details.approvals.remove(&delegate); + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::ApprovalCancelled { + collection, + item, + owner: details.owner, + delegate, + }); + + Ok(()) + } + + pub(crate) fn do_clear_all_transfer_approvals( + maybe_check_origin: Option, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownCollection)?; + + if let Some(check_origin) = maybe_check_origin { + let is_admin = Self::has_role(&collection, &check_origin, CollectionRole::Admin); + let permitted = is_admin || check_origin == details.owner; + ensure!(permitted, Error::::NoPermission); + } + + details.approvals.clear(); + Item::::insert(&collection, &item, &details); + + Self::deposit_event(Event::AllApprovalsCancelled { + collection, + item, + owner: details.owner, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/features/atomic_swap.rs b/frame/nfts/src/features/atomic_swap.rs new file mode 100644 index 0000000000000..bacaccdaedcbf --- /dev/null +++ b/frame/nfts/src/features/atomic_swap.rs @@ -0,0 +1,184 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, ExistenceRequirement::KeepAlive}, +}; + +impl, I: 'static> Pallet { + pub(crate) fn do_create_swap( + caller: T::AccountId, + offered_collection_id: T::CollectionId, + offered_item_id: T::ItemId, + desired_collection_id: T::CollectionId, + maybe_desired_item_id: Option, + maybe_price: Option>>, + duration: ::BlockNumber, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Swaps), + Error::::MethodDisabled + ); + ensure!(duration <= T::MaxDeadlineDuration::get(), Error::::WrongDuration); + + let item = Item::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownItem)?; + ensure!(item.owner == caller, Error::::NoPermission); + + match maybe_desired_item_id { + Some(desired_item_id) => ensure!( + Item::::contains_key(&desired_collection_id, &desired_item_id), + Error::::UnknownItem + ), + None => ensure!( + Collection::::contains_key(&desired_collection_id), + Error::::UnknownCollection + ), + }; + + let now = frame_system::Pallet::::block_number(); + let deadline = duration.saturating_add(now); + + PendingSwapOf::::insert( + &offered_collection_id, + &offered_item_id, + PendingSwap { + desired_collection: desired_collection_id, + desired_item: maybe_desired_item_id, + price: maybe_price.clone(), + deadline, + }, + ); + + Self::deposit_event(Event::SwapCreated { + offered_collection: offered_collection_id, + offered_item: offered_item_id, + desired_collection: desired_collection_id, + desired_item: maybe_desired_item_id, + price: maybe_price, + deadline, + }); + + Ok(()) + } + + pub(crate) fn do_cancel_swap( + caller: T::AccountId, + offered_collection_id: T::CollectionId, + offered_item_id: T::ItemId, + ) -> DispatchResult { + let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownSwap)?; + + let now = frame_system::Pallet::::block_number(); + if swap.deadline > now { + let item = Item::::get(&offered_collection_id, &offered_item_id) + .ok_or(Error::::UnknownItem)?; + ensure!(item.owner == caller, Error::::NoPermission); + } + + PendingSwapOf::::remove(&offered_collection_id, &offered_item_id); + + Self::deposit_event(Event::SwapCancelled { + offered_collection: offered_collection_id, + offered_item: offered_item_id, + desired_collection: swap.desired_collection, + desired_item: swap.desired_item, + price: swap.price, + deadline: swap.deadline, + }); + + Ok(()) + } + + pub(crate) fn do_claim_swap( + caller: T::AccountId, + send_collection_id: T::CollectionId, + send_item_id: T::ItemId, + receive_collection_id: T::CollectionId, + receive_item_id: T::ItemId, + witness_price: Option>>, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Swaps), + Error::::MethodDisabled + ); + + let send_item = Item::::get(&send_collection_id, &send_item_id) + .ok_or(Error::::UnknownItem)?; + let receive_item = Item::::get(&receive_collection_id, &receive_item_id) + .ok_or(Error::::UnknownItem)?; + let swap = PendingSwapOf::::get(&receive_collection_id, &receive_item_id) + .ok_or(Error::::UnknownSwap)?; + + ensure!(send_item.owner == caller, Error::::NoPermission); + ensure!( + swap.desired_collection == send_collection_id && swap.price == witness_price, + Error::::UnknownSwap + ); + + if let Some(desired_item) = swap.desired_item { + ensure!(desired_item == send_item_id, Error::::UnknownSwap); + } + + let now = frame_system::Pallet::::block_number(); + ensure!(now <= swap.deadline, Error::::DeadlineExpired); + + if let Some(ref price) = swap.price { + match price.direction { + PriceDirection::Send => T::Currency::transfer( + &receive_item.owner, + &send_item.owner, + price.amount, + KeepAlive, + )?, + PriceDirection::Receive => T::Currency::transfer( + &send_item.owner, + &receive_item.owner, + price.amount, + KeepAlive, + )?, + }; + } + + // This also removes the swap. + Self::do_transfer(send_collection_id, send_item_id, receive_item.owner.clone(), |_, _| { + Ok(()) + })?; + Self::do_transfer( + receive_collection_id, + receive_item_id, + send_item.owner.clone(), + |_, _| Ok(()), + )?; + + Self::deposit_event(Event::SwapClaimed { + sent_collection: send_collection_id, + sent_item: send_item_id, + sent_item_owner: send_item.owner, + received_collection: receive_collection_id, + received_item: receive_item_id, + received_item_owner: receive_item.owner, + price: swap.price, + deadline: swap.deadline, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/features/attributes.rs b/frame/nfts/src/features/attributes.rs new file mode 100644 index 0000000000000..da663d39a4ef5 --- /dev/null +++ b/frame/nfts/src/features/attributes.rs @@ -0,0 +1,323 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_set_attribute( + origin: T::AccountId, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + ensure!( + Self::is_valid_namespace( + &origin, + &namespace, + &collection, + &collection_details.owner, + &maybe_item, + )?, + Error::::NoPermission + ); + + let collection_config = Self::get_collection_config(&collection)?; + // for the `CollectionOwner` namespace we need to check if the collection/item is not locked + match namespace { + AttributeNamespace::CollectionOwner => match maybe_item { + None => { + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map(|c| c.has_disabled_setting(ItemSetting::UnlockedAttributes))?; + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }, + _ => (), + } + + let attribute = Attribute::::get((collection, maybe_item, &namespace, &key)); + if attribute.is_none() { + collection_details.attributes.saturating_inc(); + } + + let old_deposit = + attribute.map_or(AttributeDeposit { account: None, amount: Zero::zero() }, |m| m.1); + + let mut deposit = Zero::zero(); + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) || + namespace != AttributeNamespace::CollectionOwner + { + deposit = T::DepositPerByte::get() + .saturating_mul(((key.len() + value.len()) as u32).into()) + .saturating_add(T::AttributeDepositBase::get()); + } + + // NOTE: when we transfer an item, we don't move attributes in the ItemOwner namespace. + // When the new owner updates the same attribute, we will update the depositor record + // and return the deposit to the previous owner. + if old_deposit.account.is_some() && old_deposit.account != Some(origin.clone()) { + T::Currency::unreserve(&old_deposit.account.unwrap(), old_deposit.amount); + T::Currency::reserve(&origin, deposit)?; + } else if deposit > old_deposit.amount { + T::Currency::reserve(&origin, deposit - old_deposit.amount)?; + } else if deposit < old_deposit.amount { + T::Currency::unreserve(&origin, old_deposit.amount - deposit); + } + + // NOTE: we don't track the depositor in the CollectionOwner namespace as it's always a + // collection's owner. This simplifies the collection's transfer to another owner. + let deposit_owner = match namespace { + AttributeNamespace::CollectionOwner => { + collection_details.owner_deposit.saturating_accrue(deposit); + collection_details.owner_deposit.saturating_reduce(old_deposit.amount); + None + }, + _ => Some(origin), + }; + + Attribute::::insert( + (&collection, maybe_item, &namespace, &key), + (&value, AttributeDeposit { account: deposit_owner, amount: deposit }), + ); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value, namespace }); + Ok(()) + } + + pub(crate) fn do_force_set_attribute( + set_as: Option, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + let attribute = Attribute::::get((collection, maybe_item, &namespace, &key)); + if let Some((_, deposit)) = attribute { + if deposit.account != set_as && deposit.amount != Zero::zero() { + if let Some(deposit_account) = deposit.account { + T::Currency::unreserve(&deposit_account, deposit.amount); + } + } + } else { + collection_details.attributes.saturating_inc(); + } + + Attribute::::insert( + (&collection, maybe_item, &namespace, &key), + (&value, AttributeDeposit { account: set_as, amount: Zero::zero() }), + ); + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeSet { collection, maybe_item, key, value, namespace }); + Ok(()) + } + + pub(crate) fn do_clear_attribute( + maybe_check_owner: Option, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + ) -> DispatchResult { + if let Some((_, deposit)) = + Attribute::::take((collection, maybe_item, &namespace, &key)) + { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + if deposit.account != maybe_check_owner { + ensure!( + Self::is_valid_namespace( + &check_owner, + &namespace, + &collection, + &collection_details.owner, + &maybe_item, + )?, + Error::::NoPermission + ); + } + + // can't clear `CollectionOwner` type attributes if the collection/item is locked + match namespace { + AttributeNamespace::CollectionOwner => match maybe_item { + None => { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config + .is_setting_enabled(CollectionSetting::UnlockedAttributes), + Error::::LockedCollectionAttributes + ) + }, + Some(item) => { + // NOTE: if the item was previously burned, the ItemConfigOf record + // might not exist. In that case, we allow to clear the attribute. + let maybe_is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| { + c.has_disabled_setting(ItemSetting::UnlockedAttributes) + }); + ensure!(!maybe_is_locked, Error::::LockedItemAttributes); + }, + }, + _ => (), + }; + } + + collection_details.attributes.saturating_dec(); + match namespace { + AttributeNamespace::CollectionOwner => { + collection_details.owner_deposit.saturating_reduce(deposit.amount); + T::Currency::unreserve(&collection_details.owner, deposit.amount); + }, + _ => (), + }; + if let Some(deposit_account) = deposit.account { + T::Currency::unreserve(&deposit_account, deposit.amount); + } + Collection::::insert(collection, &collection_details); + Self::deposit_event(Event::AttributeCleared { collection, maybe_item, key, namespace }); + } + Ok(()) + } + + pub(crate) fn do_approve_item_attributes( + check_origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(check_origin == details.owner, Error::::NoPermission); + + ItemAttributesApprovalsOf::::try_mutate(collection, item, |approvals| { + approvals + .try_insert(delegate.clone()) + .map_err(|_| Error::::ReachedApprovalLimit)?; + + Self::deposit_event(Event::ItemAttributesApprovalAdded { collection, item, delegate }); + Ok(()) + }) + } + + pub(crate) fn do_cancel_item_attributes_approval( + check_origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + witness: CancelAttributesApprovalWitness, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Attributes), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(check_origin == details.owner, Error::::NoPermission); + + ItemAttributesApprovalsOf::::try_mutate(collection, item, |approvals| { + approvals.remove(&delegate); + + let mut attributes: u32 = 0; + let mut deposited: DepositBalanceOf = Zero::zero(); + for (_, (_, deposit)) in Attribute::::drain_prefix(( + &collection, + Some(item), + AttributeNamespace::Account(delegate.clone()), + )) { + attributes.saturating_inc(); + deposited = deposited.saturating_add(deposit.amount); + } + ensure!(attributes <= witness.account_attributes, Error::::BadWitness); + + if !deposited.is_zero() { + T::Currency::unreserve(&delegate, deposited); + } + + Self::deposit_event(Event::ItemAttributesApprovalRemoved { + collection, + item, + delegate, + }); + Ok(()) + }) + } + + fn is_valid_namespace( + origin: &T::AccountId, + namespace: &AttributeNamespace, + collection: &T::CollectionId, + collection_owner: &T::AccountId, + maybe_item: &Option, + ) -> Result { + let mut result = false; + match namespace { + AttributeNamespace::CollectionOwner => result = origin == collection_owner, + AttributeNamespace::ItemOwner => + if let Some(item) = maybe_item { + let item_details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + result = origin == &item_details.owner + }, + AttributeNamespace::Account(account_id) => + if let Some(item) = maybe_item { + let approvals = ItemAttributesApprovalsOf::::get(&collection, &item); + result = account_id == origin && approvals.contains(&origin) + }, + _ => (), + }; + Ok(result) + } + + /// A helper method to construct attribute's key. + pub fn construct_attribute_key( + key: Vec, + ) -> Result, DispatchError> { + Ok(BoundedVec::try_from(key).map_err(|_| Error::::IncorrectData)?) + } + + /// A helper method to construct attribute's value. + pub fn construct_attribute_value( + value: Vec, + ) -> Result, DispatchError> { + Ok(BoundedVec::try_from(value).map_err(|_| Error::::IncorrectData)?) + } +} diff --git a/frame/nfts/src/features/buy_sell.rs b/frame/nfts/src/features/buy_sell.rs new file mode 100644 index 0000000000000..8ba5171f8d822 --- /dev/null +++ b/frame/nfts/src/features/buy_sell.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, ExistenceRequirement, ExistenceRequirement::KeepAlive}, +}; + +impl, I: 'static> Pallet { + pub(crate) fn do_pay_tips( + sender: T::AccountId, + tips: BoundedVec, T::MaxTips>, + ) -> DispatchResult { + for tip in tips { + let ItemTip { collection, item, receiver, amount } = tip; + T::Currency::transfer(&sender, &receiver, amount, KeepAlive)?; + Self::deposit_event(Event::TipSent { + collection, + item, + sender: sender.clone(), + receiver, + amount, + }); + } + Ok(()) + } + + pub(crate) fn do_set_price( + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + price: Option>, + whitelisted_buyer: Option, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner == sender, Error::::NoPermission); + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); + + if let Some(ref price) = price { + ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); + Self::deposit_event(Event::ItemPriceSet { + collection, + item, + price: *price, + whitelisted_buyer, + }); + } else { + ItemPriceOf::::remove(&collection, &item); + Self::deposit_event(Event::ItemPriceRemoved { collection, item }); + } + + Ok(()) + } + + pub(crate) fn do_buy_item( + collection: T::CollectionId, + item: T::ItemId, + buyer: T::AccountId, + bid_price: ItemPrice, + ) -> DispatchResult { + ensure!( + Self::is_pallet_feature_enabled(PalletFeature::Trading), + Error::::MethodDisabled + ); + + let details = Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + ensure!(details.owner != buyer, Error::::NoPermission); + + let price_info = + ItemPriceOf::::get(&collection, &item).ok_or(Error::::NotForSale)?; + + ensure!(bid_price >= price_info.0, Error::::BidTooLow); + + if let Some(only_buyer) = price_info.1 { + ensure!(only_buyer == buyer, Error::::NoPermission); + } + + T::Currency::transfer( + &buyer, + &details.owner, + price_info.0, + ExistenceRequirement::KeepAlive, + )?; + + let old_owner = details.owner.clone(); + + Self::do_transfer(collection, item, buyer.clone(), |_, _| Ok(()))?; + + Self::deposit_event(Event::ItemBought { + collection, + item, + price: price_info.0, + seller: old_owner, + buyer, + }); + + Ok(()) + } +} diff --git a/frame/nfts/src/features/create_delete_collection.rs b/frame/nfts/src/features/create_delete_collection.rs new file mode 100644 index 0000000000000..86625bf49efb2 --- /dev/null +++ b/frame/nfts/src/features/create_delete_collection.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_create_collection( + collection: T::CollectionId, + owner: T::AccountId, + admin: T::AccountId, + config: CollectionConfigFor, + deposit: DepositBalanceOf, + event: Event, + ) -> DispatchResult { + ensure!(!Collection::::contains_key(collection), Error::::CollectionIdInUse); + + T::Currency::reserve(&owner, deposit)?; + + Collection::::insert( + collection, + CollectionDetails { + owner: owner.clone(), + owner_deposit: deposit, + items: 0, + item_metadatas: 0, + attributes: 0, + }, + ); + CollectionRoleOf::::insert( + collection, + admin, + CollectionRoles( + CollectionRole::Admin | CollectionRole::Freezer | CollectionRole::Issuer, + ), + ); + + let next_id = collection.increment(); + + CollectionConfigOf::::insert(&collection, config); + CollectionAccount::::insert(&owner, &collection, ()); + NextCollectionId::::set(Some(next_id)); + + Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); + Self::deposit_event(event); + Ok(()) + } + + pub fn do_destroy_collection( + collection: T::CollectionId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Collection::::try_mutate_exists(collection, |maybe_details| { + let collection_details = + maybe_details.take().ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(collection_details.owner == check_owner, Error::::NoPermission); + } + ensure!(collection_details.items == witness.items, Error::::BadWitness); + ensure!( + collection_details.item_metadatas == witness.item_metadatas, + Error::::BadWitness + ); + ensure!(collection_details.attributes == witness.attributes, Error::::BadWitness); + + for (item, details) in Item::::drain_prefix(&collection) { + Account::::remove((&details.owner, &collection, &item)); + T::Currency::unreserve(&details.deposit.account, details.deposit.amount); + } + #[allow(deprecated)] + ItemMetadataOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + ItemPriceOf::::remove_prefix(&collection, None); + #[allow(deprecated)] + PendingSwapOf::::remove_prefix(&collection, None); + CollectionMetadataOf::::remove(&collection); + Self::clear_roles(&collection)?; + + for (_, (_, deposit)) in Attribute::::drain_prefix((&collection,)) { + if !deposit.amount.is_zero() { + if let Some(account) = deposit.account { + T::Currency::unreserve(&account, deposit.amount); + } + } + } + + CollectionAccount::::remove(&collection_details.owner, &collection); + T::Currency::unreserve(&collection_details.owner, collection_details.owner_deposit); + CollectionConfigOf::::remove(&collection); + let _ = ItemConfigOf::::clear_prefix(&collection, witness.items, None); + let _ = + ItemAttributesApprovalsOf::::clear_prefix(&collection, witness.items, None); + + Self::deposit_event(Event::Destroyed { collection }); + + Ok(DestroyWitness { + items: collection_details.items, + item_metadatas: collection_details.item_metadatas, + attributes: collection_details.attributes, + }) + }) + } +} diff --git a/frame/nfts/src/features/create_delete_item.rs b/frame/nfts/src/features/create_delete_item.rs new file mode 100644 index 0000000000000..7fd745b2bfff8 --- /dev/null +++ b/frame/nfts/src/features/create_delete_item.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_mint( + collection: T::CollectionId, + item: T::ItemId, + depositor: T::AccountId, + mint_to: T::AccountId, + item_config: ItemConfig, + deposit_collection_owner: bool, + with_details_and_config: impl FnOnce( + &CollectionDetailsFor, + &CollectionConfigFor, + ) -> DispatchResult, + ) -> DispatchResult { + ensure!(!Item::::contains_key(collection, item), Error::::AlreadyExists); + + Collection::::try_mutate( + &collection, + |maybe_collection_details| -> DispatchResult { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + + let collection_config = Self::get_collection_config(&collection)?; + with_details_and_config(collection_details, &collection_config)?; + + if let Some(max_supply) = collection_config.max_supply { + ensure!(collection_details.items < max_supply, Error::::MaxSupplyReached); + } + + collection_details.items.saturating_inc(); + + let collection_config = Self::get_collection_config(&collection)?; + let deposit_amount = match collection_config + .is_setting_enabled(CollectionSetting::DepositRequired) + { + true => T::ItemDeposit::get(), + false => Zero::zero(), + }; + let deposit_account = match deposit_collection_owner { + true => collection_details.owner.clone(), + false => depositor, + }; + + let item_owner = mint_to.clone(); + Account::::insert((&item_owner, &collection, &item), ()); + + if let Ok(existing_config) = ItemConfigOf::::try_get(&collection, &item) { + ensure!(existing_config == item_config, Error::::InconsistentItemConfig); + } else { + ItemConfigOf::::insert(&collection, &item, item_config); + } + + T::Currency::reserve(&deposit_account, deposit_amount)?; + + let deposit = ItemDeposit { account: deposit_account, amount: deposit_amount }; + let details = ItemDetails { + owner: item_owner, + approvals: ApprovalsOf::::default(), + deposit, + }; + Item::::insert(&collection, &item, details); + Ok(()) + }, + )?; + + Self::deposit_event(Event::Issued { collection, item, owner: mint_to }); + Ok(()) + } + + pub fn do_burn( + collection: T::CollectionId, + item: T::ItemId, + with_details: impl FnOnce(&ItemDetailsFor) -> DispatchResult, + ) -> DispatchResult { + let owner = Collection::::try_mutate( + &collection, + |maybe_collection_details| -> Result { + let collection_details = + maybe_collection_details.as_mut().ok_or(Error::::UnknownCollection)?; + let details = Item::::get(&collection, &item) + .ok_or(Error::::UnknownCollection)?; + with_details(&details)?; + + // Return the deposit. + T::Currency::unreserve(&details.deposit.account, details.deposit.amount); + collection_details.items.saturating_dec(); + Ok(details.owner) + }, + )?; + + Item::::remove(&collection, &item); + Account::::remove((&owner, &collection, &item)); + ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); + ItemAttributesApprovalsOf::::remove(&collection, &item); + + // NOTE: if item's settings are not empty (e.g. item's metadata is locked) + // then we keep the record and don't remove it + let config = Self::get_item_config(&collection, &item)?; + if !config.has_disabled_settings() { + ItemConfigOf::::remove(&collection, &item); + } + + Self::deposit_event(Event::Burned { collection, item, owner }); + Ok(()) + } +} diff --git a/frame/nfts/src/features/lock.rs b/frame/nfts/src/features/lock.rs new file mode 100644 index 0000000000000..e96a30dfd2c7c --- /dev/null +++ b/frame/nfts/src/features/lock.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_lock_collection( + origin: T::AccountId, + collection: T::CollectionId, + lock_settings: CollectionSettings, + ) -> DispatchResult { + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); + ensure!( + !lock_settings.is_disabled(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + + for setting in lock_settings.get_disabled() { + config.disable_setting(setting); + } + + Self::deposit_event(Event::::CollectionLocked { collection }); + Ok(()) + }) + } + + pub(crate) fn do_lock_item_transfer( + origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); + + let mut config = Self::get_item_config(&collection, &item)?; + if !config.has_disabled_setting(ItemSetting::Transferable) { + config.disable_setting(ItemSetting::Transferable); + } + ItemConfigOf::::insert(&collection, &item, config); + + Self::deposit_event(Event::::ItemTransferLocked { collection, item }); + Ok(()) + } + + pub(crate) fn do_unlock_item_transfer( + origin: T::AccountId, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + ensure!( + Self::has_role(&collection, &origin, CollectionRole::Freezer), + Error::::NoPermission + ); + + let mut config = Self::get_item_config(&collection, &item)?; + if config.has_disabled_setting(ItemSetting::Transferable) { + config.enable_setting(ItemSetting::Transferable); + } + ItemConfigOf::::insert(&collection, &item, config); + + Self::deposit_event(Event::::ItemTransferUnlocked { collection, item }); + Ok(()) + } + + pub(crate) fn do_lock_item_properties( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + ItemConfigOf::::try_mutate(collection, item, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::UnknownItem)?; + + if lock_metadata { + config.disable_setting(ItemSetting::UnlockedMetadata); + } + if lock_attributes { + config.disable_setting(ItemSetting::UnlockedAttributes); + } + + Self::deposit_event(Event::::ItemPropertiesLocked { + collection, + item, + lock_metadata, + lock_attributes, + }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/features/metadata.rs b/frame/nfts/src/features/metadata.rs new file mode 100644 index 0000000000000..942f377141a33 --- /dev/null +++ b/frame/nfts/src/features/metadata.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_set_item_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + maybe_check_owner.is_none() || + item_config.is_setting_enabled(ItemSetting::UnlockedMetadata), + Error::::LockedItemMetadata + ); + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + let collection_config = Self::get_collection_config(&collection)?; + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + if metadata.is_none() { + collection_details.item_metadatas.saturating_inc(); + } + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + collection_details.owner_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if collection_config.is_setting_enabled(CollectionSetting::DepositRequired) && + maybe_check_owner.is_some() + { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&collection_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&collection_details.owner, old_deposit - deposit); + } + collection_details.owner_deposit.saturating_accrue(deposit); + + *metadata = Some(ItemMetadata { deposit, data: data.clone() }); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::ItemMetadataSet { collection, item, data }); + Ok(()) + }) + } + + pub(crate) fn do_clear_item_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let mut collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &collection_details.owner, Error::::NoPermission); + } + + // NOTE: if the item was previously burned, the ItemConfigOf record might not exist + let is_locked = Self::get_item_config(&collection, &item) + .map_or(false, |c| c.has_disabled_setting(ItemSetting::UnlockedMetadata)); + + ensure!(maybe_check_owner.is_none() || !is_locked, Error::::LockedItemMetadata); + + ItemMetadataOf::::try_mutate_exists(collection, item, |metadata| { + if metadata.is_some() { + collection_details.item_metadatas.saturating_dec(); + } + let deposit = metadata.take().ok_or(Error::::UnknownItem)?.deposit; + T::Currency::unreserve(&collection_details.owner, deposit); + collection_details.owner_deposit.saturating_reduce(deposit); + + Collection::::insert(&collection, &collection_details); + Self::deposit_event(Event::ItemMetadataCleared { collection, item }); + Ok(()) + }) + } + + pub(crate) fn do_set_collection_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + data: BoundedVec, + ) -> DispatchResult { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + + let mut details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + details.owner_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if maybe_check_owner.is_some() && + collection_config.is_setting_enabled(CollectionSetting::DepositRequired) + { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&details.owner, old_deposit - deposit); + } + details.owner_deposit.saturating_accrue(deposit); + + Collection::::insert(&collection, details); + + *metadata = Some(CollectionMetadata { deposit, data: data.clone() }); + + Self::deposit_event(Event::CollectionMetadataSet { collection, data }); + Ok(()) + }) + } + + pub(crate) fn do_clear_collection_metadata( + maybe_check_owner: Option, + collection: T::CollectionId, + ) -> DispatchResult { + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + maybe_check_owner.is_none() || + collection_config.is_setting_enabled(CollectionSetting::UnlockedMetadata), + Error::::LockedCollectionMetadata + ); + + CollectionMetadataOf::::try_mutate_exists(collection, |metadata| { + let deposit = metadata.take().ok_or(Error::::UnknownCollection)?.deposit; + T::Currency::unreserve(&details.owner, deposit); + Self::deposit_event(Event::CollectionMetadataCleared { collection }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/features/mod.rs b/frame/nfts/src/features/mod.rs new file mode 100644 index 0000000000000..b77ee9bf2491b --- /dev/null +++ b/frame/nfts/src/features/mod.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod approvals; +pub mod atomic_swap; +pub mod attributes; +pub mod buy_sell; +pub mod create_delete_collection; +pub mod create_delete_item; +pub mod lock; +pub mod metadata; +pub mod roles; +pub mod settings; +pub mod transfer; diff --git a/frame/nfts/src/features/roles.rs b/frame/nfts/src/features/roles.rs new file mode 100644 index 0000000000000..d6be9965a5e74 --- /dev/null +++ b/frame/nfts/src/features/roles.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; +use sp_std::collections::btree_map::BTreeMap; + +impl, I: 'static> Pallet { + pub(crate) fn do_set_team( + maybe_check_owner: Option, + collection: T::CollectionId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + if let Some(check_origin) = maybe_check_owner { + ensure!(check_origin == details.owner, Error::::NoPermission); + } + + // delete previous values + Self::clear_roles(&collection)?; + + let account_to_role = Self::group_roles_by_account(vec![ + (issuer.clone(), CollectionRole::Issuer), + (admin.clone(), CollectionRole::Admin), + (freezer.clone(), CollectionRole::Freezer), + ]); + for (account, roles) in account_to_role { + CollectionRoleOf::::insert(&collection, &account, roles); + } + + Self::deposit_event(Event::TeamChanged { collection, issuer, admin, freezer }); + Ok(()) + }) + } + + /// Clears all the roles in a specified collection. + /// + /// - `collection_id`: A collection to clear the roles in. + /// + /// Throws an error if some of the roles were left in storage. + /// This means the `CollectionRoles::max_roles()` needs to be adjusted. + pub(crate) fn clear_roles(collection_id: &T::CollectionId) -> Result<(), DispatchError> { + let res = CollectionRoleOf::::clear_prefix( + &collection_id, + CollectionRoles::max_roles() as u32, + None, + ); + ensure!(res.maybe_cursor.is_none(), Error::::RolesNotCleared); + Ok(()) + } + + /// Returns true if a specified account has a provided role within that collection. + /// + /// - `collection_id`: A collection to check the role in. + /// - `account_id`: An account to check the role for. + /// - `role`: A role to validate. + /// + /// Returns boolean. + pub(crate) fn has_role( + collection_id: &T::CollectionId, + account_id: &T::AccountId, + role: CollectionRole, + ) -> bool { + CollectionRoleOf::::get(&collection_id, &account_id) + .map_or(false, |roles| roles.has_role(role)) + } + + /// Groups provided roles by account, given one account could have multiple roles. + /// + /// - `input`: A vector of (Account, Role) tuples. + /// + /// Returns a grouped vector. + pub fn group_roles_by_account( + input: Vec<(T::AccountId, CollectionRole)>, + ) -> Vec<(T::AccountId, CollectionRoles)> { + let mut result = BTreeMap::new(); + for (account, role) in input.into_iter() { + result.entry(account).or_insert(CollectionRoles::none()).add_role(role); + } + result.into_iter().collect() + } +} diff --git a/frame/nfts/src/features/settings.rs b/frame/nfts/src/features/settings.rs new file mode 100644 index 0000000000000..5f408ed183c35 --- /dev/null +++ b/frame/nfts/src/features/settings.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub(crate) fn do_force_collection_config( + collection: T::CollectionId, + config: CollectionConfigFor, + ) -> DispatchResult { + ensure!(Collection::::contains_key(&collection), Error::::UnknownCollection); + CollectionConfigOf::::insert(&collection, config); + Self::deposit_event(Event::CollectionConfigChanged { collection }); + Ok(()) + } + + pub(crate) fn do_set_collection_max_supply( + maybe_check_owner: Option, + collection: T::CollectionId, + max_supply: u32, + ) -> DispatchResult { + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::UnlockedMaxSupply), + Error::::MaxSupplyLocked + ); + + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + ensure!(details.items <= max_supply, Error::::MaxSupplyTooSmall); + + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + config.max_supply = Some(max_supply); + Self::deposit_event(Event::CollectionMaxSupplySet { collection, max_supply }); + Ok(()) + }) + } + + pub(crate) fn do_update_mint_settings( + maybe_check_owner: Option, + collection: T::CollectionId, + mint_settings: MintSettings< + BalanceOf, + ::BlockNumber, + T::CollectionId, + >, + ) -> DispatchResult { + let details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + CollectionConfigOf::::try_mutate(collection, |maybe_config| { + let config = maybe_config.as_mut().ok_or(Error::::NoConfig)?; + config.mint_settings = mint_settings; + Self::deposit_event(Event::CollectionMintSettingsUpdated { collection }); + Ok(()) + }) + } + + pub(crate) fn get_collection_config( + collection_id: &T::CollectionId, + ) -> Result, DispatchError> { + let config = + CollectionConfigOf::::get(&collection_id).ok_or(Error::::NoConfig)?; + Ok(config) + } + + pub(crate) fn get_item_config( + collection_id: &T::CollectionId, + item_id: &T::ItemId, + ) -> Result { + let config = ItemConfigOf::::get(&collection_id, &item_id) + .ok_or(Error::::UnknownItem)?; + Ok(config) + } + + pub(crate) fn is_pallet_feature_enabled(feature: PalletFeature) -> bool { + let features = T::Features::get(); + return features.is_enabled(feature) + } +} diff --git a/frame/nfts/src/features/transfer.rs b/frame/nfts/src/features/transfer.rs new file mode 100644 index 0000000000000..7d6ae3553a361 --- /dev/null +++ b/frame/nfts/src/features/transfer.rs @@ -0,0 +1,166 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::pallet_prelude::*; + +impl, I: 'static> Pallet { + pub fn do_transfer( + collection: T::CollectionId, + item: T::ItemId, + dest: T::AccountId, + with_details: impl FnOnce( + &CollectionDetailsFor, + &mut ItemDetailsFor, + ) -> DispatchResult, + ) -> DispatchResult { + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(!T::Locker::is_locked(collection, item), Error::::ItemLocked); + + let collection_config = Self::get_collection_config(&collection)?; + ensure!( + collection_config.is_setting_enabled(CollectionSetting::TransferableItems), + Error::::ItemsNonTransferable + ); + + let item_config = Self::get_item_config(&collection, &item)?; + ensure!( + item_config.is_setting_enabled(ItemSetting::Transferable), + Error::::ItemLocked + ); + + let mut details = + Item::::get(&collection, &item).ok_or(Error::::UnknownItem)?; + with_details(&collection_details, &mut details)?; + + if details.deposit.account == details.owner { + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &dest, + details.deposit.amount, + Reserved, + )?; + } + + Account::::remove((&details.owner, &collection, &item)); + Account::::insert((&dest, &collection, &item), ()); + let origin = details.owner; + details.owner = dest; + + // The approved accounts have to be reset to None, because otherwise pre-approve attack + // would be possible, where the owner can approve his second account before making the + // transaction and then claiming the item back. + details.approvals.clear(); + + Item::::insert(&collection, &item, &details); + ItemPriceOf::::remove(&collection, &item); + PendingSwapOf::::remove(&collection, &item); + + Self::deposit_event(Event::Transferred { + collection, + item, + from: origin, + to: details.owner, + }); + Ok(()) + } + + pub(crate) fn do_transfer_ownership( + origin: T::AccountId, + collection: T::CollectionId, + owner: T::AccountId, + ) -> DispatchResult { + let acceptable_collection = OwnershipAcceptance::::get(&owner); + ensure!(acceptable_collection.as_ref() == Some(&collection), Error::::Unaccepted); + + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + ensure!(origin == details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.owner_deposit, + Reserved, + )?; + CollectionAccount::::remove(&details.owner, &collection); + CollectionAccount::::insert(&owner, &collection, ()); + + details.owner = owner.clone(); + OwnershipAcceptance::::remove(&owner); + + Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Ok(()) + }) + } + + pub(crate) fn do_set_accept_ownership( + who: T::AccountId, + maybe_collection: Option, + ) -> DispatchResult { + let old = OwnershipAcceptance::::get(&who); + match (old.is_some(), maybe_collection.is_some()) { + (false, true) => { + frame_system::Pallet::::inc_consumers(&who)?; + }, + (true, false) => { + frame_system::Pallet::::dec_consumers(&who); + }, + _ => {}, + } + if let Some(collection) = maybe_collection.as_ref() { + OwnershipAcceptance::::insert(&who, collection); + } else { + OwnershipAcceptance::::remove(&who); + } + Self::deposit_event(Event::OwnershipAcceptanceChanged { who, maybe_collection }); + Ok(()) + } + + pub(crate) fn do_force_collection_owner( + collection: T::CollectionId, + owner: T::AccountId, + ) -> DispatchResult { + Collection::::try_mutate(collection, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::UnknownCollection)?; + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.owner_deposit, + Reserved, + )?; + + CollectionAccount::::remove(&details.owner, &collection); + CollectionAccount::::insert(&owner, &collection, ()); + details.owner = owner.clone(); + + Self::deposit_event(Event::OwnerChanged { collection, new_owner: owner }); + Ok(()) + }) + } +} diff --git a/frame/nfts/src/impl_nonfungibles.rs b/frame/nfts/src/impl_nonfungibles.rs new file mode 100644 index 0000000000000..edfc29710b7da --- /dev/null +++ b/frame/nfts/src/impl_nonfungibles.rs @@ -0,0 +1,289 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for `nonfungibles` traits. + +use super::*; +use frame_support::{ + ensure, + storage::KeyPrefixIterator, + traits::{tokens::nonfungibles_v2::*, Get}, + BoundedSlice, +}; +use sp_runtime::{DispatchError, DispatchResult}; +use sp_std::prelude::*; + +impl, I: 'static> Inspect<::AccountId> for Pallet { + type ItemId = T::ItemId; + type CollectionId = T::CollectionId; + + fn owner( + collection: &Self::CollectionId, + item: &Self::ItemId, + ) -> Option<::AccountId> { + Item::::get(collection, item).map(|a| a.owner) + } + + fn collection_owner(collection: &Self::CollectionId) -> Option<::AccountId> { + Collection::::get(collection).map(|a| a.owner) + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// When `key` is empty, we return the item metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + namespace: &AttributeNamespace<::AccountId>, + key: &[u8], + ) -> Option> { + if key.is_empty() { + // We make the empty key map to the item metadata value. + ItemMetadataOf::::get(collection, item).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get((collection, Some(item), namespace, key)).map(|a| a.0.into()) + } + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// When `key` is empty, we return the item metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn collection_attribute(collection: &Self::CollectionId, key: &[u8]) -> Option> { + if key.is_empty() { + // We make the empty key map to the item metadata value. + CollectionMetadataOf::::get(collection).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get(( + collection, + Option::::None, + AttributeNamespace::CollectionOwner, + key, + )) + .map(|a| a.0.into()) + } + } + + /// Returns `true` if the `item` of `collection` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(collection: &Self::CollectionId, item: &Self::ItemId) -> bool { + match ( + CollectionConfigOf::::get(collection), + ItemConfigOf::::get(collection, item), + ) { + (Some(cc), Some(ic)) + if cc.is_setting_enabled(CollectionSetting::TransferableItems) && + ic.is_setting_enabled(ItemSetting::Transferable) => + true, + _ => false, + } + } +} + +impl, I: 'static> Create<::AccountId, CollectionConfigFor> + for Pallet +{ + /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. + fn create_collection( + who: &T::AccountId, + admin: &T::AccountId, + config: &CollectionConfigFor, + ) -> Result { + // DepositRequired can be disabled by calling the force_create() only + ensure!( + !config.has_disabled_setting(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); + + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + + Self::do_create_collection( + collection, + who.clone(), + admin.clone(), + *config, + T::CollectionDeposit::get(), + Event::Created { collection, creator: who.clone(), owner: admin.clone() }, + )?; + Ok(collection) + } +} + +impl, I: 'static> Destroy<::AccountId> for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(collection: &Self::CollectionId) -> Option { + Collection::::get(collection).map(|a| a.destroy_witness()) + } + + fn destroy( + collection: Self::CollectionId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy_collection(collection, witness, maybe_check_owner) + } +} + +impl, I: 'static> Mutate<::AccountId, ItemConfig> for Pallet { + fn mint_into( + collection: &Self::CollectionId, + item: &Self::ItemId, + who: &T::AccountId, + item_config: &ItemConfig, + deposit_collection_owner: bool, + ) -> DispatchResult { + Self::do_mint( + *collection, + *item, + who.clone(), + who.clone(), + *item_config, + deposit_collection_owner, + |_, _| Ok(()), + ) + } + + fn burn( + collection: &Self::CollectionId, + item: &Self::ItemId, + maybe_check_owner: Option<&T::AccountId>, + ) -> DispatchResult { + Self::do_burn(*collection, *item, |d| { + if let Some(check_owner) = maybe_check_owner { + if &d.owner != check_owner { + return Err(Error::::NoPermission.into()) + } + } + Ok(()) + }) + } + + fn set_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &[u8], + value: &[u8], + ) -> DispatchResult { + Self::do_force_set_attribute( + None, + *collection, + Some(*item), + AttributeNamespace::Pallet, + Self::construct_attribute_key(key.to_vec())?, + Self::construct_attribute_value(value.to_vec())?, + ) + } + + fn set_typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| { + >::set_attribute(collection, item, k, v) + }) + }) + } + + fn set_collection_attribute( + collection: &Self::CollectionId, + key: &[u8], + value: &[u8], + ) -> DispatchResult { + Self::do_force_set_attribute( + None, + *collection, + None, + AttributeNamespace::Pallet, + Self::construct_attribute_key(key.to_vec())?, + Self::construct_attribute_value(value.to_vec())?, + ) + } + + fn set_typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| { + >::set_collection_attribute( + collection, k, v, + ) + }) + }) + } +} + +impl, I: 'static> Transfer for Pallet { + fn transfer( + collection: &Self::CollectionId, + item: &Self::ItemId, + destination: &T::AccountId, + ) -> DispatchResult { + Self::do_transfer(*collection, *item, destination.clone(), |_, _| Ok(())) + } +} + +impl, I: 'static> InspectEnumerable for Pallet { + type CollectionsIterator = KeyPrefixIterator<>::CollectionId>; + type ItemsIterator = KeyPrefixIterator<>::ItemId>; + type OwnedIterator = + KeyPrefixIterator<(>::CollectionId, >::ItemId)>; + type OwnedInCollectionIterator = KeyPrefixIterator<>::ItemId>; + + /// Returns an iterator of the collections in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn collections() -> Self::CollectionsIterator { + Collection::::iter_keys() + } + + /// Returns an iterator of the items of a `collection` in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator { + Item::::iter_key_prefix(collection) + } + + /// Returns an iterator of the items of all collections owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned(who: &T::AccountId) -> Self::OwnedIterator { + Account::::iter_key_prefix((who,)) + } + + /// Returns an iterator of the items of `collection` owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned_in_collection( + collection: &Self::CollectionId, + who: &T::AccountId, + ) -> Self::OwnedInCollectionIterator { + Account::::iter_key_prefix((who, collection)) + } +} diff --git a/frame/nfts/src/lib.rs b/frame/nfts/src/lib.rs new file mode 100644 index 0000000000000..2006d78959c4d --- /dev/null +++ b/frame/nfts/src/lib.rs @@ -0,0 +1,1769 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Nfts Module +//! +//! A simple, secure module for dealing with non-fungible items. +//! +//! ## Related Modules +//! +//! * [`System`](../frame_system/index.html) +//! * [`Support`](../frame_support/index.html) + +#![recursion_limit = "256"] +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; + +mod common_functions; +mod features; +mod impl_nonfungibles; +mod types; + +pub mod macros; +pub mod weights; + +use codec::{Decode, Encode}; +use frame_support::traits::{ + tokens::{AttributeNamespace, Locker}, + BalanceStatus::Reserved, + Currency, EnsureOriginWithArg, ReservableCurrency, +}; +use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{Saturating, StaticLookup, Zero}, + RuntimeDebug, +}; +use sp_std::prelude::*; + +pub use pallet::*; +pub use types::*; +pub use weights::WeightInfo; + +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[cfg(feature = "runtime-benchmarks")] + pub trait BenchmarkHelper { + fn collection(i: u16) -> CollectionId; + fn item(i: u16) -> ItemId; + } + #[cfg(feature = "runtime-benchmarks")] + impl, ItemId: From> BenchmarkHelper for () { + fn collection(i: u16) -> CollectionId { + i.into() + } + fn item(i: u16) -> ItemId { + i.into() + } + } + + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + + /// Identifier for the collection of item. + type CollectionId: Member + Parameter + MaxEncodedLen + Copy + Incrementable; + + /// The type used to identify a unique item within a collection. + type ItemId: Member + Parameter + MaxEncodedLen + Copy; + + /// The currency mechanism, used for paying for reserves. + type Currency: ReservableCurrency; + + /// The origin which may forcibly create or destroy an item or otherwise alter privileged + /// attributes. + type ForceOrigin: EnsureOrigin; + + /// Standard collection creation is only allowed if the origin attempting it and the + /// collection are in this set. + type CreateOrigin: EnsureOriginWithArg< + Self::RuntimeOrigin, + Self::CollectionId, + Success = Self::AccountId, + >; + + /// Locker trait to enable Locking mechanism downstream. + type Locker: Locker; + + /// The basic amount of funds that must be reserved for collection. + #[pallet::constant] + type CollectionDeposit: Get>; + + /// The basic amount of funds that must be reserved for an item. + #[pallet::constant] + type ItemDeposit: Get>; + + /// The basic amount of funds that must be reserved when adding metadata to your item. + #[pallet::constant] + type MetadataDepositBase: Get>; + + /// The basic amount of funds that must be reserved when adding an attribute to an item. + #[pallet::constant] + type AttributeDepositBase: Get>; + + /// The additional funds that must be reserved for the number of bytes store in metadata, + /// either "normal" metadata or attribute metadata. + #[pallet::constant] + type DepositPerByte: Get>; + + /// The maximum length of data stored on-chain. + #[pallet::constant] + type StringLimit: Get; + + /// The maximum length of an attribute key. + #[pallet::constant] + type KeyLimit: Get; + + /// The maximum length of an attribute value. + #[pallet::constant] + type ValueLimit: Get; + + /// The maximum approvals an item could have. + #[pallet::constant] + type ApprovalsLimit: Get; + + /// The maximum attributes approvals an item could have. + #[pallet::constant] + type ItemAttributesApprovalsLimit: Get; + + /// The max number of tips a user could send. + #[pallet::constant] + type MaxTips: Get; + + /// The max duration in blocks for deadlines. + #[pallet::constant] + type MaxDeadlineDuration: Get<::BlockNumber>; + + /// Disables some of pallet's features. + #[pallet::constant] + type Features: Get; + + #[cfg(feature = "runtime-benchmarks")] + /// A set of helper functions for benchmarking. + type Helper: BenchmarkHelper; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// Details of a collection. + #[pallet::storage] + pub(super) type Collection, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::CollectionId, + CollectionDetails>, + >; + + /// The collection, if any, of which an account is willing to take ownership. + #[pallet::storage] + pub(super) type OwnershipAcceptance, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::CollectionId>; + + /// The items held by any given account; set out this way so that items owned by a single + /// account can be enumerated. + #[pallet::storage] + pub(super) type Account, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, // owner + NMapKey, + NMapKey, + ), + (), + OptionQuery, + >; + + /// The collections owned by any given account; set out this way so that collections owned by + /// a single account can be enumerated. + #[pallet::storage] + pub(super) type CollectionAccount, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Blake2_128Concat, + T::CollectionId, + (), + OptionQuery, + >; + + /// The items in existence and their ownership details. + #[pallet::storage] + /// Stores collection roles as per account. + pub(super) type CollectionRoleOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::AccountId, + CollectionRoles, + OptionQuery, + >; + + /// The items in existence and their ownership details. + #[pallet::storage] + pub(super) type Item, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemDetails, ApprovalsOf>, + OptionQuery, + >; + + /// Metadata of a collection. + #[pallet::storage] + pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::CollectionId, + CollectionMetadata, T::StringLimit>, + OptionQuery, + >; + + /// Metadata of an item. + #[pallet::storage] + pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemMetadata, T::StringLimit>, + OptionQuery, + >; + + /// Attributes of a collection. + #[pallet::storage] + pub(super) type Attribute, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, + NMapKey>, + NMapKey>, + NMapKey>, + ), + (BoundedVec, AttributeDepositOf), + OptionQuery, + >; + + /// A price of an item. + #[pallet::storage] + pub(super) type ItemPriceOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + (ItemPrice, Option), + OptionQuery, + >; + + /// Item attribute approvals. + #[pallet::storage] + pub(super) type ItemAttributesApprovalsOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemAttributesApprovals, + ValueQuery, + >; + + /// Stores the `CollectionId` that is going to be used for the next collection. + /// This gets incremented whenever a new collection is created. + #[pallet::storage] + pub(super) type NextCollectionId, I: 'static = ()> = + StorageValue<_, T::CollectionId, OptionQuery>; + + /// Handles all the pending swaps. + #[pallet::storage] + pub(super) type PendingSwapOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + PendingSwap< + T::CollectionId, + T::ItemId, + PriceWithDirection>, + ::BlockNumber, + >, + OptionQuery, + >; + + /// Config of a collection. + #[pallet::storage] + pub(super) type CollectionConfigOf, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::CollectionId, CollectionConfigFor, OptionQuery>; + + /// Config of an item. + #[pallet::storage] + pub(super) type ItemConfigOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::CollectionId, + Blake2_128Concat, + T::ItemId, + ItemConfig, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// A `collection` was created. + Created { collection: T::CollectionId, creator: T::AccountId, owner: T::AccountId }, + /// A `collection` was force-created. + ForceCreated { collection: T::CollectionId, owner: T::AccountId }, + /// A `collection` was destroyed. + Destroyed { collection: T::CollectionId }, + /// An `item` was issued. + Issued { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, + /// An `item` was transferred. + Transferred { + collection: T::CollectionId, + item: T::ItemId, + from: T::AccountId, + to: T::AccountId, + }, + /// An `item` was destroyed. + Burned { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, + /// An `item` became non-transferable. + ItemTransferLocked { collection: T::CollectionId, item: T::ItemId }, + /// An `item` became transferable. + ItemTransferUnlocked { collection: T::CollectionId, item: T::ItemId }, + /// `item` metadata or attributes were locked. + ItemPropertiesLocked { + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + }, + /// Some `collection` was locked. + CollectionLocked { collection: T::CollectionId }, + /// The owner changed. + OwnerChanged { collection: T::CollectionId, new_owner: T::AccountId }, + /// The management team changed. + TeamChanged { + collection: T::CollectionId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + }, + /// An `item` of a `collection` has been approved by the `owner` for transfer by + /// a `delegate`. + TransferApproved { + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + delegate: T::AccountId, + deadline: Option<::BlockNumber>, + }, + /// An approval for a `delegate` account to transfer the `item` of an item + /// `collection` was cancelled by its `owner`. + ApprovalCancelled { + collection: T::CollectionId, + item: T::ItemId, + owner: T::AccountId, + delegate: T::AccountId, + }, + /// All approvals of an item got cancelled. + AllApprovalsCancelled { collection: T::CollectionId, item: T::ItemId, owner: T::AccountId }, + /// A `collection` has had its config changed by the `Force` origin. + CollectionConfigChanged { collection: T::CollectionId }, + /// New metadata has been set for a `collection`. + CollectionMetadataSet { collection: T::CollectionId, data: BoundedVec }, + /// Metadata has been cleared for a `collection`. + CollectionMetadataCleared { collection: T::CollectionId }, + /// New metadata has been set for an item. + ItemMetadataSet { + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + }, + /// Metadata has been cleared for an item. + ItemMetadataCleared { collection: T::CollectionId, item: T::ItemId }, + /// The deposit for a set of `item`s within a `collection` has been updated. + Redeposited { collection: T::CollectionId, successful_items: Vec }, + /// New attribute metadata has been set for a `collection` or `item`. + AttributeSet { + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + value: BoundedVec, + namespace: AttributeNamespace, + }, + /// Attribute metadata has been cleared for a `collection` or `item`. + AttributeCleared { + collection: T::CollectionId, + maybe_item: Option, + key: BoundedVec, + namespace: AttributeNamespace, + }, + /// A new approval to modify item attributes was added. + ItemAttributesApprovalAdded { + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + }, + /// A new approval to modify item attributes was removed. + ItemAttributesApprovalRemoved { + collection: T::CollectionId, + item: T::ItemId, + delegate: T::AccountId, + }, + /// Ownership acceptance has changed for an account. + OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, + /// Max supply has been set for a collection. + CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, + /// Mint settings for a collection had changed. + CollectionMintSettingsUpdated { collection: T::CollectionId }, + /// Event gets emitted when the `NextCollectionId` gets incremented. + NextCollectionIdIncremented { next_id: T::CollectionId }, + /// The price was set for the item. + ItemPriceSet { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + whitelisted_buyer: Option, + }, + /// The price for the item was removed. + ItemPriceRemoved { collection: T::CollectionId, item: T::ItemId }, + /// An item was bought. + ItemBought { + collection: T::CollectionId, + item: T::ItemId, + price: ItemPrice, + seller: T::AccountId, + buyer: T::AccountId, + }, + /// A tip was sent. + TipSent { + collection: T::CollectionId, + item: T::ItemId, + sender: T::AccountId, + receiver: T::AccountId, + amount: DepositBalanceOf, + }, + /// An `item` swap intent was created. + SwapCreated { + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + desired_item: Option, + price: Option>>, + deadline: ::BlockNumber, + }, + /// The swap was cancelled. + SwapCancelled { + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + desired_item: Option, + price: Option>>, + deadline: ::BlockNumber, + }, + /// The swap has been claimed. + SwapClaimed { + sent_collection: T::CollectionId, + sent_item: T::ItemId, + sent_item_owner: T::AccountId, + received_collection: T::CollectionId, + received_item: T::ItemId, + received_item_owner: T::AccountId, + price: Option>>, + deadline: ::BlockNumber, + }, + } + + #[pallet::error] + pub enum Error { + /// The signing account has no permission to do the operation. + NoPermission, + /// The given item ID is unknown. + UnknownCollection, + /// The item ID has already been used for an item. + AlreadyExists, + /// The approval had a deadline that expired, so the approval isn't valid anymore. + ApprovalExpired, + /// The owner turned out to be different to what was expected. + WrongOwner, + /// The witness data given does not match the current state of the chain. + BadWitness, + /// Collection ID is already taken. + CollectionIdInUse, + /// Items within that collection are non-transferable. + ItemsNonTransferable, + /// The provided account is not a delegate. + NotDelegate, + /// The delegate turned out to be different to what was expected. + WrongDelegate, + /// No approval exists that would allow the transfer. + Unapproved, + /// The named owner has not signed ownership acceptance of the collection. + Unaccepted, + /// The item is locked (non-transferable). + ItemLocked, + /// Item's attributes are locked. + LockedItemAttributes, + /// Collection's attributes are locked. + LockedCollectionAttributes, + /// Item's metadata is locked. + LockedItemMetadata, + /// Collection's metadata is locked. + LockedCollectionMetadata, + /// All items have been minted. + MaxSupplyReached, + /// The max supply is locked and can't be changed. + MaxSupplyLocked, + /// The provided max supply is less than the number of items a collection already has. + MaxSupplyTooSmall, + /// The given item ID is unknown. + UnknownItem, + /// Swap doesn't exist. + UnknownSwap, + /// Item is not for sale. + NotForSale, + /// The provided bid is too low. + BidTooLow, + /// The item has reached its approval limit. + ReachedApprovalLimit, + /// The deadline has already expired. + DeadlineExpired, + /// The duration provided should be less than or equal to `MaxDeadlineDuration`. + WrongDuration, + /// The method is disabled by system settings. + MethodDisabled, + /// The provided setting can't be set. + WrongSetting, + /// Item's config already exists and should be equal to the provided one. + InconsistentItemConfig, + /// Config for a collection or an item can't be found. + NoConfig, + /// Some roles were not cleared. + RolesNotCleared, + /// Mint has not started yet. + MintNotStarted, + /// Mint has already ended. + MintEnded, + /// The provided Item was already used for claiming. + AlreadyClaimed, + /// The provided data is incorrect. + IncorrectData, + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Issue a new collection of non-fungible items from a public origin. + /// + /// This new collection has no items initially and its owner is the origin. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// `ItemDeposit` funds of sender are reserved. + /// + /// Parameters: + /// - `admin`: The admin of this collection. The admin is the initial address of each + /// member of the collection's admin team. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::create())] + pub fn create( + origin: OriginFor, + admin: AccountIdLookupOf, + config: CollectionConfigFor, + ) -> DispatchResult { + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + + let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; + let admin = T::Lookup::lookup(admin)?; + + // DepositRequired can be disabled by calling the force_create() only + ensure!( + !config.has_disabled_setting(CollectionSetting::DepositRequired), + Error::::WrongSetting + ); + + Self::do_create_collection( + collection, + owner.clone(), + admin.clone(), + config, + T::CollectionDeposit::get(), + Event::Created { collection, creator: owner, owner: admin }, + ) + } + + /// Issue a new collection of non-fungible items from a privileged origin. + /// + /// This new collection has no items initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `owner`: The owner of this collection of items. The owner has full superuser + /// permissions over this item, but may later change and configure the permissions using + /// `transfer_ownership` and `set_team`. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::force_create())] + pub fn force_create( + origin: OriginFor, + owner: AccountIdLookupOf, + config: CollectionConfigFor, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; + + let collection = + NextCollectionId::::get().unwrap_or(T::CollectionId::initial_value()); + + Self::do_create_collection( + collection, + owner.clone(), + owner.clone(), + config, + Zero::zero(), + Event::ForceCreated { collection, owner }, + ) + } + + /// Destroy a collection of fungible items. + /// + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the + /// owner of the `collection`. + /// + /// - `collection`: The identifier of the collection to be destroyed. + /// - `witness`: Information on the items minted in the collection. This must be + /// correct. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(n + m)` where: + /// - `n = witness.items` + /// - `m = witness.item_metadatas` + /// - `a = witness.attributes` + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::destroy( + witness.items, + witness.item_metadatas, + witness.attributes, + ))] + pub fn destroy( + origin: OriginFor, + collection: T::CollectionId, + witness: DestroyWitness, + ) -> DispatchResultWithPostInfo { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + let details = Self::do_destroy_collection(collection, witness, maybe_check_owner)?; + + Ok(Some(T::WeightInfo::destroy( + details.items, + details.item_metadatas, + details.attributes, + )) + .into()) + } + + /// Mint an item of a particular collection. + /// + /// The origin must be Signed and the sender must be the Issuer of the `collection`. + /// + /// - `collection`: The collection of the item to be minted. + /// - `item`: An identifier of the new item. + /// - `mint_to`: Account into which the item will be minted. + /// - `witness_data`: When the mint type is `HolderOf(collection_id)`, then the owned + /// item_id from that collection needs to be provided within the witness data object. + /// + /// Note: the deposit will be taken from the `origin` and not the `owner` of the `item`. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::mint())] + pub fn mint( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + mint_to: AccountIdLookupOf, + witness_data: Option>, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + let mint_to = T::Lookup::lookup(mint_to)?; + + let collection_config = Self::get_collection_config(&collection)?; + let item_settings = collection_config.mint_settings.default_item_settings; + let item_config = ItemConfig { settings: item_settings }; + + Self::do_mint( + collection, + item, + caller.clone(), + mint_to.clone(), + item_config, + false, + |collection_details, collection_config| { + // Issuer can mint regardless of mint settings + if Self::has_role(&collection, &caller, CollectionRole::Issuer) { + return Ok(()) + } + + let mint_settings = collection_config.mint_settings; + let now = frame_system::Pallet::::block_number(); + + if let Some(start_block) = mint_settings.start_block { + ensure!(start_block <= now, Error::::MintNotStarted); + } + if let Some(end_block) = mint_settings.end_block { + ensure!(end_block >= now, Error::::MintEnded); + } + + match mint_settings.mint_type { + MintType::Issuer => return Err(Error::::NoPermission.into()), + MintType::HolderOf(collection_id) => { + let MintWitness { owner_of_item } = + witness_data.ok_or(Error::::BadWitness)?; + + let has_item = Account::::contains_key(( + &caller, + &collection_id, + &owner_of_item, + )); + ensure!(has_item, Error::::BadWitness); + + let attribute_key = Self::construct_attribute_key( + PalletAttributes::::UsedToClaim(collection) + .encode(), + )?; + + let key = ( + &collection_id, + Some(owner_of_item), + AttributeNamespace::Pallet, + &attribute_key, + ); + let already_claimed = Attribute::::contains_key(key.clone()); + ensure!(!already_claimed, Error::::AlreadyClaimed); + + let value = Self::construct_attribute_value(vec![0])?; + Attribute::::insert( + key, + (value, AttributeDeposit { account: None, amount: Zero::zero() }), + ); + }, + _ => {}, + } + + if let Some(price) = mint_settings.price { + T::Currency::transfer( + &caller, + &collection_details.owner, + price, + ExistenceRequirement::KeepAlive, + )?; + } + + Ok(()) + }, + ) + } + + /// Mint an item of a particular collection from a privileged origin. + /// + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the + /// Issuer of the `collection`. + /// + /// - `collection`: The collection of the item to be minted. + /// - `item`: An identifier of the new item. + /// - `mint_to`: Account into which the item will be minted. + /// - `item_config`: A config of the new item. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::force_mint())] + pub fn force_mint( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + mint_to: AccountIdLookupOf, + item_config: ItemConfig, + ) -> DispatchResult { + let maybe_check_origin = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + let mint_to = T::Lookup::lookup(mint_to)?; + + if let Some(check_origin) = maybe_check_origin { + ensure!( + Self::has_role(&collection, &check_origin, CollectionRole::Issuer), + Error::::NoPermission + ); + } + Self::do_mint(collection, item, mint_to.clone(), mint_to, item_config, true, |_, _| { + Ok(()) + }) + } + + /// Destroy a single item. + /// + /// Origin must be Signed and the sender should be the Admin of the `collection`. + /// + /// - `collection`: The collection of the item to be burned. + /// - `item`: The item to be burned. + /// - `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the + /// item is owned by this value. + /// + /// Emits `Burned` with the actual amount burned. + /// + /// Weight: `O(1)` + /// Modes: `check_owner.is_some()`. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::burn())] + pub fn burn( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + check_owner: Option>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; + + Self::do_burn(collection, item, |details| { + let is_admin = Self::has_role(&collection, &origin, CollectionRole::Admin); + let is_permitted = is_admin || details.owner == origin; + ensure!(is_permitted, Error::::NoPermission); + ensure!( + check_owner.map_or(true, |o| o == details.owner), + Error::::WrongOwner + ); + Ok(()) + }) + } + + /// Move an item from the sender account to another. + /// + /// Origin must be Signed and the signing account must be either: + /// - the Admin of the `collection`; + /// - the Owner of the `item`; + /// - the approved delegate for the `item` (in this case, the approval is reset). + /// + /// Arguments: + /// - `collection`: The collection of the item to be transferred. + /// - `item`: The item to be transferred. + /// - `dest`: The account to receive ownership of the item. + /// + /// Emits `Transferred`. + /// + /// Weight: `O(1)` + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + dest: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + + Self::do_transfer(collection, item, dest, |_, details| { + let is_admin = Self::has_role(&collection, &origin, CollectionRole::Admin); + if details.owner != origin && !is_admin { + let deadline = + details.approvals.get(&origin).ok_or(Error::::NoPermission)?; + if let Some(d) = deadline { + let block_number = frame_system::Pallet::::block_number(); + ensure!(block_number <= *d, Error::::ApprovalExpired); + } + } + Ok(()) + }) + } + + /// Re-evaluate the deposits on some items. + /// + /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// + /// - `collection`: The collection of the items to be reevaluated. + /// - `items`: The items of the collection whose deposits will be reevaluated. + /// + /// NOTE: This exists as a best-effort function. Any items which are unknown or + /// in the case that the owner account does not have reservable funds to pay for a + /// deposit increase are ignored. Generally the owner isn't going to call this on items + /// whose existing deposit is less than the refreshed deposit as it would only cost them, + /// so it's of little consequence. + /// + /// It will still return an error in the case that the collection is unknown or the signer + /// is not permitted to call it. + /// + /// Weight: `O(items.len())` + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::redeposit(items.len() as u32))] + pub fn redeposit( + origin: OriginFor, + collection: T::CollectionId, + items: Vec, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let collection_details = + Collection::::get(&collection).ok_or(Error::::UnknownCollection)?; + ensure!(collection_details.owner == origin, Error::::NoPermission); + + let config = Self::get_collection_config(&collection)?; + let deposit = match config.is_setting_enabled(CollectionSetting::DepositRequired) { + true => T::ItemDeposit::get(), + false => Zero::zero(), + }; + + let mut successful = Vec::with_capacity(items.len()); + for item in items.into_iter() { + let mut details = match Item::::get(&collection, &item) { + Some(x) => x, + None => continue, + }; + let old = details.deposit.amount; + if old > deposit { + T::Currency::unreserve(&details.deposit.account, old - deposit); + } else if deposit > old { + if T::Currency::reserve(&details.deposit.account, deposit - old).is_err() { + // NOTE: No alterations made to collection_details in this iteration so far, + // so this is OK to do. + continue + } + } else { + continue + } + details.deposit.amount = deposit; + Item::::insert(&collection, &item, &details); + successful.push(item); + } + + Self::deposit_event(Event::::Redeposited { + collection, + successful_items: successful, + }); + + Ok(()) + } + + /// Disallow further unprivileged transfer of an item. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection of the item to be changed. + /// - `item`: The item to become non-transferable. + /// + /// Emits `ItemTransferLocked`. + /// + /// Weight: `O(1)` + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::lock_item_transfer())] + pub fn lock_item_transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_lock_item_transfer(origin, collection, item) + } + + /// Re-allow unprivileged transfer of an item. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection of the item to be changed. + /// - `item`: The item to become transferable. + /// + /// Emits `ItemTransferUnlocked`. + /// + /// Weight: `O(1)` + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::unlock_item_transfer())] + pub fn unlock_item_transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_unlock_item_transfer(origin, collection, item) + } + + /// Disallows specified settings for the whole collection. + /// + /// Origin must be Signed and the sender should be the Freezer of the `collection`. + /// + /// - `collection`: The collection to be locked. + /// - `lock_settings`: The settings to be locked. + /// + /// Note: it's possible to only lock(set) the setting, but not to unset it. + /// Emits `CollectionLocked`. + /// + /// Weight: `O(1)` + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::lock_collection())] + pub fn lock_collection( + origin: OriginFor, + collection: T::CollectionId, + lock_settings: CollectionSettings, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_lock_collection(origin, collection, lock_settings) + } + + /// Change the Owner of a collection. + /// + /// Origin must be Signed and the sender should be the Owner of the `collection`. + /// + /// - `collection`: The collection whose owner should be changed. + /// - `owner`: The new Owner of this collection. They must have called + /// `set_accept_ownership` with `collection` in order for this operation to succeed. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::transfer_ownership())] + pub fn transfer_ownership( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + Self::do_transfer_ownership(origin, collection, owner) + } + + /// Change the Issuer, Admin and Freezer of a collection. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// - `collection`: The collection whose team should be changed. + /// - `issuer`: The new Issuer of this collection. + /// - `admin`: The new Admin of this collection. + /// - `freezer`: The new Freezer of this collection. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[pallet::call_index(12)] + #[pallet::weight(T::WeightInfo::set_team())] + pub fn set_team( + origin: OriginFor, + collection: T::CollectionId, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + Self::do_set_team(maybe_check_owner, collection, issuer, admin, freezer) + } + + /// Change the Owner of a collection. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `collection`: The identifier of the collection. + /// - `owner`: The new Owner of this collection. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::call_index(13)] + #[pallet::weight(T::WeightInfo::force_collection_owner())] + pub fn force_collection_owner( + origin: OriginFor, + collection: T::CollectionId, + owner: AccountIdLookupOf, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let new_owner = T::Lookup::lookup(owner)?; + Self::do_force_collection_owner(collection, new_owner) + } + + /// Change the config of a collection. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `collection`: The identifier of the collection. + /// - `config`: The new config of this collection. + /// + /// Emits `CollectionConfigChanged`. + /// + /// Weight: `O(1)` + #[pallet::call_index(14)] + #[pallet::weight(T::WeightInfo::force_collection_config())] + pub fn force_collection_config( + origin: OriginFor, + collection: T::CollectionId, + config: CollectionConfigFor, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + Self::do_force_collection_config(collection, config) + } + + /// Approve an item to be transferred by a delegated third-party account. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `item`. + /// + /// - `collection`: The collection of the item to be approved for delegated transfer. + /// - `item`: The item to be approved for delegated transfer. + /// - `delegate`: The account to delegate permission to transfer the item. + /// - `maybe_deadline`: Optional deadline for the approval. Specified by providing the + /// number of blocks after which the approval will expire + /// + /// Emits `TransferApproved` on success. + /// + /// Weight: `O(1)` + #[pallet::call_index(15)] + #[pallet::weight(T::WeightInfo::approve_transfer())] + pub fn approve_transfer( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + maybe_deadline: Option<::BlockNumber>, + ) -> DispatchResult { + let maybe_check_origin = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_approve_transfer( + maybe_check_origin, + collection, + item, + delegate, + maybe_deadline, + ) + } + + /// Cancel one of the transfer approvals for a specific item. + /// + /// Origin must be either: + /// - the `Force` origin; + /// - `Signed` with the signer being the Admin of the `collection`; + /// - `Signed` with the signer being the Owner of the `item`; + /// + /// Arguments: + /// - `collection`: The collection of the item of whose approval will be cancelled. + /// - `item`: The item of the collection of whose approval will be cancelled. + /// - `delegate`: The account that is going to loose their approval. + /// + /// Emits `ApprovalCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::call_index(16)] + #[pallet::weight(T::WeightInfo::cancel_approval())] + pub fn cancel_approval( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + ) -> DispatchResult { + let maybe_check_origin = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_cancel_approval(maybe_check_origin, collection, item, delegate) + } + + /// Cancel all the approvals of a specific item. + /// + /// Origin must be either: + /// - the `Force` origin; + /// - `Signed` with the signer being the Admin of the `collection`; + /// - `Signed` with the signer being the Owner of the `item`; + /// + /// Arguments: + /// - `collection`: The collection of the item of whose approvals will be cleared. + /// - `item`: The item of the collection of whose approvals will be cleared. + /// + /// Emits `AllApprovalsCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::call_index(17)] + #[pallet::weight(T::WeightInfo::clear_all_transfer_approvals())] + pub fn clear_all_transfer_approvals( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let maybe_check_origin = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_all_transfer_approvals(maybe_check_origin, collection, item) + } + + /// Disallows changing the metadata or attributes of the item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// - `collection`: The collection if the `item`. + /// - `item`: An item to be locked. + /// - `lock_metadata`: Specifies whether the metadata should be locked. + /// - `lock_attributes`: Specifies whether the attributes in the `CollectionOwner` namespace + /// should be locked. + /// + /// Note: `lock_attributes` affects the attributes in the `CollectionOwner` namespace + /// only. When the metadata or attributes are locked, it won't be possible the unlock them. + /// + /// Emits `ItemPropertiesLocked`. + /// + /// Weight: `O(1)` + #[pallet::call_index(18)] + #[pallet::weight(T::WeightInfo::lock_item_properties())] + pub fn lock_item_properties( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + lock_metadata: bool, + lock_attributes: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_lock_item_properties( + maybe_check_owner, + collection, + item, + lock_metadata, + lock_attributes, + ) + } + + /// Set an attribute for a collection or item. + /// + /// Origin must be Signed and must conform to the namespace ruleset: + /// - `CollectionOwner` namespace could be modified by the `collection` owner only; + /// - `ItemOwner` namespace could be modified by the `maybe_item` owner only. `maybe_item` + /// should be set in that case; + /// - `Account(AccountId)` namespace could be modified only when the `origin` was given a + /// permission to do so; + /// + /// The funds of `origin` are reserved according to the formula: + /// `AttributeDepositBase + DepositPerByte * (key.len + value.len)` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `maybe_item`: The identifier of the item whose metadata to set. + /// - `namespace`: Attribute's namespace. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::call_index(19)] + #[pallet::weight(T::WeightInfo::set_attribute())] + pub fn set_attribute( + origin: OriginFor, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_set_attribute(origin, collection, maybe_item, namespace, key, value) + } + + /// Force-set an attribute for a collection or item. + /// + /// Origin must be `ForceOrigin`. + /// + /// If the attribute already exists and it was set by another account, the deposit + /// will be returned to the previous owner. + /// + /// - `set_as`: An optional owner of the attribute. + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `maybe_item`: The identifier of the item whose metadata to set. + /// - `namespace`: Attribute's namespace. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::call_index(20)] + #[pallet::weight(T::WeightInfo::force_set_attribute())] + pub fn force_set_attribute( + origin: OriginFor, + set_as: Option, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + Self::do_force_set_attribute(set_as, collection, maybe_item, namespace, key, value) + } + + /// Clear an attribute for a collection or item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose item's metadata to clear. + /// - `maybe_item`: The identifier of the item whose metadata to clear. + /// - `namespace`: Attribute's namespace. + /// - `key`: The key of the attribute. + /// + /// Emits `AttributeCleared`. + /// + /// Weight: `O(1)` + #[pallet::call_index(21)] + #[pallet::weight(T::WeightInfo::clear_attribute())] + pub fn clear_attribute( + origin: OriginFor, + collection: T::CollectionId, + maybe_item: Option, + namespace: AttributeNamespace, + key: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_attribute(maybe_check_owner, collection, maybe_item, namespace, key) + } + + /// Approve item's attributes to be changed by a delegated third-party account. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: A collection of the item. + /// - `item`: The item that holds attributes. + /// - `delegate`: The account to delegate permission to change attributes of the item. + /// + /// Emits `ItemAttributesApprovalAdded` on success. + #[pallet::call_index(22)] + #[pallet::weight(T::WeightInfo::approve_item_attributes())] + pub fn approve_item_attributes( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_approve_item_attributes(origin, collection, item, delegate) + } + + /// Cancel the previously provided approval to change item's attributes. + /// All the previously set attributes by the `delegate` will be removed. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: Collection that the item is contained within. + /// - `item`: The item that holds attributes. + /// - `delegate`: The previously approved account to remove. + /// + /// Emits `ItemAttributesApprovalRemoved` on success. + #[pallet::call_index(23)] + #[pallet::weight(T::WeightInfo::cancel_item_attributes_approval( + witness.account_attributes + ))] + pub fn cancel_item_attributes_approval( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + delegate: AccountIdLookupOf, + witness: CancelAttributesApprovalWitness, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + Self::do_cancel_item_attributes_approval(origin, collection, item, delegate, witness) + } + + /// Set the metadata for an item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the collection whose item's metadata to set. + /// - `item`: The identifier of the item whose metadata to set. + /// - `data`: The general information of this item. Limited in length by `StringLimit`. + /// + /// Emits `ItemMetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::call_index(24)] + #[pallet::weight(T::WeightInfo::set_metadata())] + pub fn set_metadata( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + data: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_item_metadata(maybe_check_owner, collection, item, data) + } + + /// Clear the metadata for an item. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// `collection`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose item's metadata to clear. + /// - `item`: The identifier of the item whose metadata to clear. + /// + /// Emits `ItemMetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::call_index(25)] + #[pallet::weight(T::WeightInfo::clear_metadata())] + pub fn clear_metadata( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_item_metadata(maybe_check_owner, collection, item) + } + + /// Set the metadata for a collection. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// If the origin is `Signed`, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `collection`: The identifier of the item whose metadata to update. + /// - `data`: The general information of this item. Limited in length by `StringLimit`. + /// + /// Emits `CollectionMetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::call_index(26)] + #[pallet::weight(T::WeightInfo::set_collection_metadata())] + pub fn set_collection_metadata( + origin: OriginFor, + collection: T::CollectionId, + data: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_collection_metadata(maybe_check_owner, collection, data) + } + + /// Clear the metadata for a collection. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// Any deposit is freed for the collection's owner. + /// + /// - `collection`: The identifier of the collection whose metadata to clear. + /// + /// Emits `CollectionMetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::call_index(27)] + #[pallet::weight(T::WeightInfo::clear_collection_metadata())] + pub fn clear_collection_metadata( + origin: OriginFor, + collection: T::CollectionId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_clear_collection_metadata(maybe_check_owner, collection) + } + + /// Set (or reset) the acceptance of ownership for a particular account. + /// + /// Origin must be `Signed` and if `maybe_collection` is `Some`, then the signer must have a + /// provider reference. + /// + /// - `maybe_collection`: The identifier of the collection whose ownership the signer is + /// willing to accept, or if `None`, an indication that the signer is willing to accept no + /// ownership transferal. + /// + /// Emits `OwnershipAcceptanceChanged`. + #[pallet::call_index(28)] + #[pallet::weight(T::WeightInfo::set_accept_ownership())] + pub fn set_accept_ownership( + origin: OriginFor, + maybe_collection: Option, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_set_accept_ownership(who, maybe_collection) + } + + /// Set the maximum number of items a collection could have. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// - `collection`: The identifier of the collection to change. + /// - `max_supply`: The maximum number of items a collection could have. + /// + /// Emits `CollectionMaxSupplySet` event when successful. + #[pallet::call_index(29)] + #[pallet::weight(T::WeightInfo::set_collection_max_supply())] + pub fn set_collection_max_supply( + origin: OriginFor, + collection: T::CollectionId, + max_supply: u32, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_set_collection_max_supply(maybe_check_owner, collection, max_supply) + } + + /// Update mint settings. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the `collection`. + /// + /// - `collection`: The identifier of the collection to change. + /// - `mint_settings`: The new mint settings. + /// + /// Emits `CollectionMintSettingsUpdated` event when successful. + #[pallet::call_index(30)] + #[pallet::weight(T::WeightInfo::update_mint_settings())] + pub fn update_mint_settings( + origin: OriginFor, + collection: T::CollectionId, + mint_settings: MintSettings< + BalanceOf, + ::BlockNumber, + T::CollectionId, + >, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + Self::do_update_mint_settings(maybe_check_owner, collection, mint_settings) + } + + /// Set (or reset) the price for an item. + /// + /// Origin must be Signed and must be the owner of the asset `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item to set the price for. + /// - `price`: The price for the item. Pass `None`, to reset the price. + /// - `buyer`: Restricts the buy operation to a specific account. + /// + /// Emits `ItemPriceSet` on success if the price is not `None`. + /// Emits `ItemPriceRemoved` on success if the price is `None`. + #[pallet::call_index(31)] + #[pallet::weight(T::WeightInfo::set_price())] + pub fn set_price( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + price: Option>, + whitelisted_buyer: Option>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; + Self::do_set_price(collection, item, origin, price, whitelisted_buyer) + } + + /// Allows to buy an item if it's up for sale. + /// + /// Origin must be Signed and must not be the owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item the sender wants to buy. + /// - `bid_price`: The price the sender is willing to pay. + /// + /// Emits `ItemBought` on success. + #[pallet::call_index(32)] + #[pallet::weight(T::WeightInfo::buy_item())] + pub fn buy_item( + origin: OriginFor, + collection: T::CollectionId, + item: T::ItemId, + bid_price: ItemPrice, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_buy_item(collection, item, origin, bid_price) + } + + /// Allows to pay the tips. + /// + /// Origin must be Signed. + /// + /// - `tips`: Tips array. + /// + /// Emits `TipSent` on every tip transfer. + #[pallet::call_index(33)] + #[pallet::weight(T::WeightInfo::pay_tips(tips.len() as u32))] + pub fn pay_tips( + origin: OriginFor, + tips: BoundedVec, T::MaxTips>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_pay_tips(origin, tips) + } + + /// Register a new atomic swap, declaring an intention to send an `item` in exchange for + /// `desired_item` from origin to target on the current blockchain. + /// The target can execute the swap during the specified `duration` of blocks (if set). + /// Additionally, the price could be set for the desired `item`. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item an owner wants to give. + /// - `desired_collection`: The collection of the desired item. + /// - `desired_item`: The desired item an owner wants to receive. + /// - `maybe_price`: The price an owner is willing to pay or receive for the desired `item`. + /// - `duration`: A deadline for the swap. Specified by providing the number of blocks + /// after which the swap will expire. + /// + /// Emits `SwapCreated` on success. + #[pallet::call_index(34)] + #[pallet::weight(T::WeightInfo::create_swap())] + pub fn create_swap( + origin: OriginFor, + offered_collection: T::CollectionId, + offered_item: T::ItemId, + desired_collection: T::CollectionId, + maybe_desired_item: Option, + maybe_price: Option>>, + duration: ::BlockNumber, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_create_swap( + origin, + offered_collection, + offered_item, + desired_collection, + maybe_desired_item, + maybe_price, + duration, + ) + } + + /// Cancel an atomic swap. + /// + /// Origin must be Signed. + /// Origin must be an owner of the `item` if the deadline hasn't expired. + /// + /// - `collection`: The collection of the item. + /// - `item`: The item an owner wants to give. + /// + /// Emits `SwapCancelled` on success. + #[pallet::call_index(35)] + #[pallet::weight(T::WeightInfo::cancel_swap())] + pub fn cancel_swap( + origin: OriginFor, + offered_collection: T::CollectionId, + offered_item: T::ItemId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_cancel_swap(origin, offered_collection, offered_item) + } + + /// Claim an atomic swap. + /// This method executes a pending swap, that was created by a counterpart before. + /// + /// Origin must be Signed and must be an owner of the `item`. + /// + /// - `send_collection`: The collection of the item to be sent. + /// - `send_item`: The item to be sent. + /// - `receive_collection`: The collection of the item to be received. + /// - `receive_item`: The item to be received. + /// - `witness_price`: A price that was previously agreed on. + /// + /// Emits `SwapClaimed` on success. + #[pallet::call_index(36)] + #[pallet::weight(T::WeightInfo::claim_swap())] + pub fn claim_swap( + origin: OriginFor, + send_collection: T::CollectionId, + send_item: T::ItemId, + receive_collection: T::CollectionId, + receive_item: T::ItemId, + witness_price: Option>>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_claim_swap( + origin, + send_collection, + send_item, + receive_collection, + receive_item, + witness_price, + ) + } + } +} diff --git a/frame/nfts/src/macros.rs b/frame/nfts/src/macros.rs new file mode 100644 index 0000000000000..07a8f3b9f9556 --- /dev/null +++ b/frame/nfts/src/macros.rs @@ -0,0 +1,74 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +macro_rules! impl_incrementable { + ($($type:ty),+) => { + $( + impl Incrementable for $type { + fn increment(&self) -> Self { + let mut val = self.clone(); + val.saturating_inc(); + val + } + + fn initial_value() -> Self { + 0 + } + } + )+ + }; +} +pub(crate) use impl_incrementable; + +macro_rules! impl_codec_bitflags { + ($wrapper:ty, $size:ty, $bitflag_enum:ty) => { + impl MaxEncodedLen for $wrapper { + fn max_encoded_len() -> usize { + <$size>::max_encoded_len() + } + } + impl Encode for $wrapper { + fn using_encoded R>(&self, f: F) -> R { + self.0.bits().using_encoded(f) + } + } + impl EncodeLike for $wrapper {} + impl Decode for $wrapper { + fn decode( + input: &mut I, + ) -> sp_std::result::Result { + let field = <$size>::decode(input)?; + Ok(Self(BitFlags::from_bits(field as $size).map_err(|_| "invalid value")?)) + } + } + + impl TypeInfo for $wrapper { + type Identity = Self; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("BitFlags", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::<$bitflag_enum>()))]) + .composite( + Fields::unnamed() + .field(|f| f.ty::<$size>().type_name(stringify!($bitflag_enum))), + ) + } + } + }; +} +pub(crate) use impl_codec_bitflags; diff --git a/frame/nfts/src/mock.rs b/frame/nfts/src/mock.rs new file mode 100644 index 0000000000000..f814b209d5f78 --- /dev/null +++ b/frame/nfts/src/mock.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Nfts pallet. + +use super::*; +use crate as pallet_nfts; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Nfts: pallet_nfts::{Pallet, Call, Storage, Event}, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub storage Features: PalletFeatures = PalletFeatures::all_enabled(); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type CollectionId = u32; + type ItemId = u32; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Locker = (); + type CollectionDeposit = ConstU64<2>; + type ItemDeposit = ConstU64<1>; + type MetadataDepositBase = ConstU64<1>; + type AttributeDepositBase = ConstU64<1>; + type DepositPerByte = ConstU64<1>; + type StringLimit = ConstU32<50>; + type KeyLimit = ConstU32<50>; + type ValueLimit = ConstU32<50>; + type ApprovalsLimit = ConstU32<10>; + type ItemAttributesApprovalsLimit = ConstU32<2>; + type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = ConstU64<10000>; + type Features = Features; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/nfts/src/tests.rs b/frame/nfts/src/tests.rs new file mode 100644 index 0000000000000..18a3fd83b4de3 --- /dev/null +++ b/frame/nfts/src/tests.rs @@ -0,0 +1,2484 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Nfts pallet. + +use crate::{mock::*, Event, *}; +use enumflags2::BitFlags; +use frame_support::{ + assert_noop, assert_ok, + dispatch::Dispatchable, + traits::{ + tokens::nonfungibles_v2::{Destroy, Mutate}, + Currency, Get, + }, +}; +use pallet_balances::Error as BalancesError; +use sp_core::bounded::BoundedVec; +use sp_std::prelude::*; + +fn items() -> Vec<(u64, u32, u32)> { + let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); + r.sort(); + let mut s: Vec<_> = Item::::iter().map(|x| (x.2.owner, x.0, x.1)).collect(); + s.sort(); + assert_eq!(r, s); + for collection in Item::::iter() + .map(|x| x.0) + .scan(None, |s, item| { + if s.map_or(false, |last| last == item) { + *s = Some(item); + Some(None) + } else { + Some(Some(item)) + } + }) + .flatten() + { + let details = Collection::::get(collection).unwrap(); + let items = Item::::iter_prefix(collection).count() as u32; + assert_eq!(details.items, items); + } + r +} + +fn collections() -> Vec<(u64, u32)> { + let mut r: Vec<_> = CollectionAccount::::iter().map(|x| (x.0, x.1)).collect(); + r.sort(); + let mut s: Vec<_> = Collection::::iter().map(|x| (x.1.owner, x.0)).collect(); + s.sort(); + assert_eq!(r, s); + r +} + +macro_rules! bvec { + ($( $x:tt )*) => { + vec![$( $x )*].try_into().unwrap() + } +} + +fn attributes(collection: u32) -> Vec<(Option, AttributeNamespace, Vec, Vec)> { + let mut s: Vec<_> = Attribute::::iter_prefix((collection,)) + .map(|(k, v)| (k.0, k.1, k.2.into(), v.0.into())) + .collect(); + s.sort_by_key(|k: &(Option, AttributeNamespace, Vec, Vec)| k.0); + s.sort_by_key(|k: &(Option, AttributeNamespace, Vec, Vec)| k.2.clone()); + s +} + +fn approvals(collection_id: u32, item_id: u32) -> Vec<(u64, Option)> { + let item = Item::::get(collection_id, item_id).unwrap(); + let s: Vec<_> = item.approvals.into_iter().collect(); + s +} + +fn item_attributes_approvals(collection_id: u32, item_id: u32) -> Vec { + let approvals = ItemAttributesApprovalsOf::::get(collection_id, item_id); + let s: Vec<_> = approvals.into_iter().collect(); + s +} + +fn events() -> Vec> { + let result = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let mock::RuntimeEvent::Nfts(inner) = e { Some(inner) } else { None }) + .collect::>(); + + System::reset_events(); + + result +} + +fn collection_config_from_disabled_settings( + settings: BitFlags, +) -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::from_disabled(settings), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn collection_config_with_all_settings_enabled() -> CollectionConfigFor { + CollectionConfig { + settings: CollectionSettings::all_enabled(), + max_supply: None, + mint_settings: MintSettings::default(), + } +} + +fn default_collection_config() -> CollectionConfigFor { + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()) +} + +fn default_item_config() -> ItemConfig { + ItemConfig { settings: ItemSettings::all_enabled() } +} + +fn item_config_from_disabled_settings(settings: BitFlags) -> ItemConfig { + ItemConfig { settings: ItemSettings::from_disabled(settings) } +} + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(items(), vec![]); + }); +} + +#[test] +fn basic_minting_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_eq!(collections(), vec![(1, 0)]); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_eq!(items(), vec![(1, 0, 42)]); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 2, default_collection_config())); + assert_eq!(collections(), vec![(1, 0), (2, 1)]); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 1, 69, 1, None)); + // assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 1, 69, 1, default_item_config())); + assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); + }); +} + +#[test] +fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + collection_config_with_all_settings_enabled() + )); + assert_eq!(Balances::reserved_balance(&1), 2); + assert_eq!(collections(), vec![(1, 0)]); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0, 0])); + assert_eq!(Balances::reserved_balance(&1), 5); + assert!(CollectionMetadataOf::::contains_key(0)); + + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 10, default_item_config())); + assert_eq!(Balances::reserved_balance(&1), 6); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 20, default_item_config())); + assert_eq!(Balances::reserved_balance(&1), 7); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 70, 1, None)); + assert_eq!(items(), vec![(1, 0, 70), (10, 0, 42), (20, 0, 69)]); + assert_eq!(Collection::::get(0).unwrap().items, 3); + assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); + + assert_eq!(Balances::reserved_balance(&2), 0); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 70, 2)); + assert_eq!(Balances::reserved_balance(&2), 1); + + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42])); + assert_eq!(Balances::reserved_balance(&1), 10); + assert!(ItemMetadataOf::::contains_key(0, 42)); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69])); + assert_eq!(Balances::reserved_balance(&1), 13); + assert!(ItemMetadataOf::::contains_key(0, 69)); + + let w = Nfts::get_destroy_witness(&0).unwrap(); + assert_eq!(w.items, 3); + assert_eq!(w.item_metadatas, 2); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Collection::::contains_key(0)); + assert!(!CollectionConfigOf::::contains_key(0)); + assert!(!Item::::contains_key(0, 42)); + assert!(!Item::::contains_key(0, 69)); + assert!(!CollectionMetadataOf::::contains_key(0)); + assert!(!ItemMetadataOf::::contains_key(0, 42)); + assert!(!ItemMetadataOf::::contains_key(0, 69)); + assert_eq!(collections(), vec![]); + assert_eq!(items(), vec![]); + }); +} + +#[test] +fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + collection_config_with_all_settings_enabled() + )); + + let w = Collection::::get(0).unwrap().destroy_witness(); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_noop!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); + }); +} + +#[test] +fn mint_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_eq!(Nfts::owner(0, 42).unwrap(), 1); + assert_eq!(collections(), vec![(1, 0)]); + assert_eq!(items(), vec![(1, 0, 42)]); + + // validate minting start and end settings + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { + start_block: Some(2), + end_block: Some(3), + mint_type: MintType::Public, + ..Default::default() + } + )); + + System::set_block_number(1); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 1, None), + Error::::MintNotStarted + ); + System::set_block_number(4); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 1, None), + Error::::MintEnded + ); + + // validate price + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { mint_type: MintType::Public, price: Some(1), ..Default::default() } + )); + Balances::make_free_balance_be(&2, 100); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 43, 2, None)); + assert_eq!(Balances::total_balance(&2), 99); + + // validate types + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 1, + MintSettings { mint_type: MintType::HolderOf(0), ..Default::default() } + )); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(3), 1, 42, 3, None), + Error::::BadWitness + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 42, 2, None), + Error::::BadWitness + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 42, 2, Some(MintWitness { owner_of_item: 42 })), + Error::::BadWitness + ); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(2), + 1, + 42, + 2, + Some(MintWitness { owner_of_item: 43 }) + )); + + // can't mint twice + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 1, 46, 2, Some(MintWitness { owner_of_item: 43 })), + Error::::AlreadyClaimed + ); + }); +} + +#[test] +fn transfer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_eq!(items(), vec![(3, 0, 42)]); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 4), + Error::::NoPermission + ); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(3), 0, 42, 2, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 4)); + + // validate we can't transfer non-transferable items + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 1, 1, 42, default_item_config())); + + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(1), collection_id, 42, 3,), + Error::::ItemsNonTransferable + ); + }); +} + +#[test] +fn locking_transfer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); + assert_noop!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::ItemLocked); + + assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(1), + 0, + CollectionSettings::from_disabled(CollectionSetting::TransferableItems.into()) + )); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2), + Error::::ItemsNonTransferable + ); + + assert_ok!(Nfts::force_collection_config( + RuntimeOrigin::root(), + 0, + collection_config_with_all_settings_enabled(), + )); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); + }); +} + +#[test] +fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + + Balances::make_free_balance_be(&2, 100); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!( + Nfts::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), + Error::::NoPermission + ); + assert_noop!( + Nfts::lock_item_transfer(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Nfts::unlock_item_transfer(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(2), 0, 69, 2, None), + Error::::NoPermission + ); + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(2), 0, 42, None), + Error::::NoPermission + ); + let w = Nfts::get_destroy_witness(&0).unwrap(); + assert_noop!(Nfts::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); + }); +} + +#[test] +fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + assert_ok!(Nfts::create( + RuntimeOrigin::signed(1), + 1, + collection_config_with_all_settings_enabled() + )); + assert_eq!(collections(), vec![(1, 0)]); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), + Error::::Unaccepted + ); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); + + assert_eq!(collections(), vec![(2, 0)]); + assert_eq!(Balances::total_balance(&1), 98); + assert_eq!(Balances::total_balance(&2), 102); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 2); + + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(1), Some(0))); + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), + Error::::NoPermission + ); + + // Mint and set metadata now and make sure that deposit gets transferred back. + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20])); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_eq!(Balances::reserved_balance(&1), 1); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20])); + assert_ok!(Nfts::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); + assert_ok!(Nfts::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); + assert_eq!(collections(), vec![(3, 0)]); + assert_eq!(Balances::total_balance(&2), 58); + assert_eq!(Balances::total_balance(&3), 144); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::reserved_balance(&3), 44); + + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 1); + + // 2's acceptance from before is reset when it became an owner, so it cannot be transferred + // without a fresh acceptance. + assert_noop!( + Nfts::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), + Error::::Unaccepted + ); + }); +} + +#[test] +fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(2), 0, 42, 2, None)); + assert_ok!(Nfts::lock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); + assert_ok!(Nfts::unlock_item_transfer(RuntimeOrigin::signed(4), 0, 42)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 42, None)); + }); +} + +#[test] +fn set_collection_metadata_should_work() { + new_test_ext().execute_with(|| { + // Cannot add metadata to unknown item + assert_noop!( + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20]), + Error::::NoConfig, + ); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + // Cannot add metadata to unowned item + assert_noop!( + Nfts::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20]), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + Balances::make_free_balance_be(&1, 30); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20])); + assert_eq!(Balances::free_balance(&1), 9); + assert!(CollectionMetadataOf::::contains_key(0)); + + // Force origin works, too. + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 18])); + + // Update deposit + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15])); + assert_eq!(Balances::free_balance(&1), 14); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 25])); + assert_eq!(Balances::free_balance(&1), 4); + + // Cannot over-reserve + assert_noop!( + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40]), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15])); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(1), + 0, + CollectionSettings::from_disabled(CollectionSetting::UnlockedMetadata.into()) + )); + assert_noop!( + Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15]), + Error::::LockedCollectionMetadata, + ); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0), + Error::::LockedCollectionMetadata + ); + + // Clear Metadata + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::root(), 0, bvec![0u8; 15])); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(2), 0), + Error::::NoPermission + ); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 1), + Error::::UnknownCollection + ); + assert_noop!( + Nfts::clear_collection_metadata(RuntimeOrigin::signed(1), 0), + Error::::LockedCollectionMetadata + ); + assert_ok!(Nfts::clear_collection_metadata(RuntimeOrigin::root(), 0)); + assert!(!CollectionMetadataOf::::contains_key(0)); + }); +} + +#[test] +fn set_item_metadata_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 30); + + // Cannot add metadata to unknown item + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + // Cannot add metadata to unowned item + assert_noop!( + Nfts::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20]), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20])); + assert_eq!(Balances::free_balance(&1), 8); + assert!(ItemMetadataOf::::contains_key(0, 42)); + + // Force origin works, too. + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18])); + + // Update deposit + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15])); + assert_eq!(Balances::free_balance(&1), 13); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25])); + assert_eq!(Balances::free_balance(&1), 3); + + // Cannot over-reserve + assert_noop!( + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40]), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15])); + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 42, true, false)); + assert_noop!( + Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15]), + Error::::LockedItemMetadata, + ); + assert_noop!( + Nfts::clear_metadata(RuntimeOrigin::signed(1), 0, 42), + Error::::LockedItemMetadata, + ); + + // Clear Metadata + assert_ok!(Nfts::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15])); + assert_noop!( + Nfts::clear_metadata(RuntimeOrigin::signed(2), 0, 42), + Error::::NoPermission, + ); + assert_noop!( + Nfts::clear_metadata(RuntimeOrigin::signed(1), 1, 42), + Error::::UnknownCollection, + ); + assert_ok!(Nfts::clear_metadata(RuntimeOrigin::root(), 0, 42)); + assert!(!ItemMetadataOf::::contains_key(0, 42)); + }); +} + +#[test] +fn set_collection_owner_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![1], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Collection::::get(0).unwrap().owner_deposit, 9); + + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0; 10], + )); + assert_eq!( + attributes(0), + vec![ + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 19); + assert_eq!(Collection::::get(0).unwrap().owner_deposit, 18); + + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![1], + )); + assert_eq!( + attributes(0), + vec![ + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 16); + + let w = Nfts::get_destroy_witness(&0).unwrap(); + assert_ok!(Nfts::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_eq!(attributes(0), vec![]); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn set_item_owner_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 2, default_item_config())); + + // can't set for the collection + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + None, + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + // can't set for the non-owned item + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![2], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 9); + + // validate an attribute can be updated + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0; 10], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 18); + + // validate only item's owner (or the root) can remove an attribute + assert_noop!( + Nfts::clear_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + assert_eq!(Balances::reserved_balance(2), 15); + + // transfer item + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 0, 3)); + + // validate the attribute are still here & the deposit belongs to the previous owner + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 10]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + let key: BoundedVec<_, _> = bvec![0]; + let (_, deposit) = + Attribute::::get((0, Some(0), AttributeNamespace::ItemOwner, &key)).unwrap(); + assert_eq!(deposit.account, Some(2)); + assert_eq!(deposit.amount, 12); + + // on attribute update the deposit should be returned to the previous owner + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + bvec![0; 11], + )); + let (_, deposit) = + Attribute::::get((0, Some(0), AttributeNamespace::ItemOwner, &key)).unwrap(); + assert_eq!(deposit.account, Some(3)); + assert_eq!(deposit.amount, 13); + assert_eq!(Balances::reserved_balance(2), 3); + assert_eq!(Balances::reserved_balance(3), 13); + + // validate attributes on item deletion + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 0, None)); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::ItemOwner, bvec![0], bvec![0; 11]), + (Some(0), AttributeNamespace::ItemOwner, bvec![2], bvec![0]) + ] + ); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![0], + )); + assert_ok!(Nfts::clear_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![2], + )); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::reserved_balance(3), 0); + }); +} + +#[test] +fn set_external_account_attributes_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config())); + assert_ok!(Nfts::approve_item_attributes(RuntimeOrigin::signed(1), 0, 0, 2)); + + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(1), + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![1], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::Account(2), bvec![0], bvec![0]), + (Some(0), AttributeNamespace::Account(2), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(2), 6); + + // remove permission to set attributes + assert_ok!(Nfts::cancel_item_attributes_approval( + RuntimeOrigin::signed(1), + 0, + 0, + 2, + CancelAttributesApprovalWitness { account_attributes: 2 }, + )); + assert_eq!(attributes(0), vec![]); + assert_eq!(Balances::reserved_balance(2), 0); + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::Account(2), + bvec![0], + bvec![0], + ), + Error::::NoPermission, + ); + }); +} + +#[test] +fn validate_deposit_required_setting() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + + // with the disabled DepositRequired setting, only the collection's owner can set the + // attributes for free. + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 2, default_item_config())); + assert_ok!(Nfts::approve_item_attributes(RuntimeOrigin::signed(2), 0, 0, 3)); + + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(2), + 0, + Some(0), + AttributeNamespace::ItemOwner, + bvec![1], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(3), + 0, + Some(0), + AttributeNamespace::Account(3), + bvec![2], + bvec![0], + )); + assert_ok!(::AccountId, ItemConfig>>::set_attribute( + &0, + &0, + &[3], + &[0], + )); + assert_eq!( + attributes(0), + vec![ + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::ItemOwner, bvec![1], bvec![0]), + (Some(0), AttributeNamespace::Account(3), bvec![2], bvec![0]), + (Some(0), AttributeNamespace::Pallet, bvec![3], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::reserved_balance(2), 3); + assert_eq!(Balances::reserved_balance(3), 3); + }); +} + +#[test] +fn set_attribute_should_respect_lock() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled(), + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, None)); + + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + )); + assert_eq!( + attributes(0), + vec![ + (None, AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(0), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + (Some(1), AttributeNamespace::CollectionOwner, bvec![0], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 11); + + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![])); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(1), + 0, + CollectionSettings::from_disabled(CollectionSetting::UnlockedAttributes.into()) + )); + + let e = Error::::LockedCollectionAttributes; + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + ), + e + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + )); + + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, false, true)); + let e = Error::::LockedItemAttributes; + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + ), + e + ); + assert_ok!(Nfts::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![1], + )); + }); +} + +#[test] +fn preserve_config_for_frozen_items() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 1, 1, None)); + + // if the item is not locked/frozen then the config gets deleted on item burn + assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 1, Some(1))); + assert!(!ItemConfigOf::::contains_key(0, 1)); + + // lock the item and ensure the config stays unchanged + assert_ok!(Nfts::lock_item_properties(RuntimeOrigin::signed(1), 0, 0, true, true)); + + let expect_config = item_config_from_disabled_settings( + ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata, + ); + let config = ItemConfigOf::::get(0, 0).unwrap(); + assert_eq!(config, expect_config); + + assert_ok!(Nfts::burn(RuntimeOrigin::signed(1), 0, 0, Some(1))); + let config = ItemConfigOf::::get(0, 0).unwrap(); + assert_eq!(config, expect_config); + + // can't mint with the different config + assert_noop!( + Nfts::force_mint(RuntimeOrigin::signed(1), 0, 0, 1, default_item_config()), + Error::::InconsistentItemConfig + ); + + assert_ok!(Nfts::update_mint_settings( + RuntimeOrigin::signed(1), + 0, + MintSettings { + default_item_settings: ItemSettings::from_disabled( + ItemSetting::UnlockedAttributes | ItemSetting::UnlockedMetadata + ), + ..Default::default() + } + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 0, 1, None)); + }); +} + +#[test] +fn force_update_collection_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 42, 1, None)); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(1), 65); + + // force item status to be free holding + assert_ok!(Nfts::force_collection_config( + RuntimeOrigin::root(), + 0, + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()), + )); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 0, 142, 1, None)); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 169, 2, default_item_config())); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20])); + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20])); + + Balances::make_free_balance_be(&5, 100); + assert_ok!(Nfts::force_collection_owner(RuntimeOrigin::root(), 0, 5)); + assert_eq!(collections(), vec![(5, 0)]); + assert_eq!(Balances::reserved_balance(1), 2); + assert_eq!(Balances::reserved_balance(5), 63); + + assert_ok!(Nfts::redeposit(RuntimeOrigin::signed(5), 0, bvec![0, 42, 50, 69, 100])); + assert_eq!(Balances::reserved_balance(1), 0); + + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(5), 0, 42, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 42); + + assert_ok!(Nfts::set_metadata(RuntimeOrigin::signed(5), 0, 69, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 21); + + assert_ok!(Nfts::set_collection_metadata(RuntimeOrigin::signed(5), 0, bvec![0; 20])); + assert_eq!(Balances::reserved_balance(5), 0); + + // validate new roles + assert_ok!(Nfts::set_team(RuntimeOrigin::root(), 0, 2, 3, 4)); + assert_eq!( + CollectionRoleOf::::get(0, 2).unwrap(), + CollectionRoles(CollectionRole::Issuer.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 3).unwrap(), + CollectionRoles(CollectionRole::Admin.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 4).unwrap(), + CollectionRoles(CollectionRole::Freezer.into()) + ); + + assert_ok!(Nfts::set_team(RuntimeOrigin::root(), 0, 3, 2, 3)); + + assert_eq!( + CollectionRoleOf::::get(0, 2).unwrap(), + CollectionRoles(CollectionRole::Admin.into()) + ); + assert_eq!( + CollectionRoleOf::::get(0, 3).unwrap(), + CollectionRoles(CollectionRole::Issuer | CollectionRole::Freezer) + ); + }); +} + +#[test] +fn burn_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_with_all_settings_enabled() + )); + assert_ok!(Nfts::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(5)), + Error::::UnknownCollection + ); + + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 0, 42, 5, default_item_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(2), 0, 69, 5, default_item_config())); + assert_eq!(Balances::reserved_balance(1), 2); + + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(0), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(6)), + Error::::WrongOwner + ); + + assert_ok!(Nfts::burn(RuntimeOrigin::signed(5), 0, 42, Some(5))); + assert_ok!(Nfts::burn(RuntimeOrigin::signed(3), 0, 69, Some(5))); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn approval_lifecycle_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 3), + Error::::NoPermission + ); + assert!(Item::::get(0, 42).unwrap().approvals.is_empty()); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 2, None)); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(2), 0, 42, 2)); + + // ensure we can't buy an item when the collection has a NonTransferableItems flag + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(1), 1, collection_id, 1, None)); + + assert_noop!( + Nfts::approve_transfer(RuntimeOrigin::signed(1), collection_id, 1, 2, None), + Error::::ItemsNonTransferable + ); + }); +} + +#[test] +fn cancel_approval_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(2), 1, 42, 3), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 43, 3), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(3), 0, 42, 3), + Error::::NoPermission + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 4), + Error::::NotDelegate + ); + + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(2), 0, 42, 3), + Error::::NotDelegate + ); + + let current_block = 1; + System::set_block_number(current_block); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 69, 2, default_item_config())); + // approval expires after 2 blocks. + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(5), 0, 42, 3), + Error::::NoPermission + ); + + System::set_block_number(current_block + 3); + // 5 can cancel the approval since the deadline has passed. + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(5), 0, 42, 3)); + assert_eq!(approvals(0, 69), vec![]); + }); +} + +#[test] +fn approving_multiple_accounts_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + let current_block = 1; + System::set_block_number(current_block); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 5, Some(2))); + assert_eq!(approvals(0, 42), vec![(3, None), (4, None), (5, Some(current_block + 2))]); + + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(4), 0, 42, 6)); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 7), + Error::::NoPermission + ); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(5), 0, 42, 8), + Error::::NoPermission + ); + }); +} + +#[test] +fn approvals_limit_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + for i in 3..13 { + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, i, None)); + } + // the limit is 10 + assert_noop!( + Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 14, None), + Error::::ReachedApprovalLimit + ); + }); +} + +#[test] +fn approval_deadline_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert!(System::block_number().is_zero()); + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + 1, + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()) + )); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + // the approval expires after the 2nd block. + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, Some(2))); + + System::set_block_number(3); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4), + Error::::ApprovalExpired + ); + System::set_block_number(1); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); + + assert_eq!(System::block_number(), 1); + // make a new approval with a deadline after 4 blocks, so it will expire after the 5th + // block. + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 6, Some(4))); + // this should still work. + System::set_block_number(5); + assert_ok!(Nfts::transfer(RuntimeOrigin::signed(6), 0, 42, 5)); + }); +} + +#[test] +fn cancel_approval_works_with_admin() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(1), 1, 42, 1), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 43, 1), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 4), + Error::::NotDelegate + ); + + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 3)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::signed(1), 0, 42, 1), + Error::::NotDelegate + ); + }); +} + +#[test] +fn cancel_approval_works_with_force() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 1, 42, 1), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 43, 1), + Error::::UnknownItem + ); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 4), + Error::::NotDelegate + ); + + assert_ok!(Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 3)); + assert_noop!( + Nfts::cancel_approval(RuntimeOrigin::root(), 0, 42, 1), + Error::::NotDelegate + ); + }); +} + +#[test] +fn clear_all_transfer_approvals_works() { + new_test_ext().execute_with(|| { + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + assert_ok!(Nfts::force_mint(RuntimeOrigin::signed(1), 0, 42, 2, default_item_config())); + + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3, None)); + assert_ok!(Nfts::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 4, None)); + + assert_noop!( + Nfts::clear_all_transfer_approvals(RuntimeOrigin::signed(3), 0, 42), + Error::::NoPermission + ); + + assert_ok!(Nfts::clear_all_transfer_approvals(RuntimeOrigin::signed(2), 0, 42)); + + assert!(events().contains(&Event::::AllApprovalsCancelled { + collection: 0, + item: 42, + owner: 2, + })); + assert_eq!(approvals(0, 42), vec![]); + + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(3), 0, 42, 5), + Error::::NoPermission + ); + assert_noop!( + Nfts::transfer(RuntimeOrigin::signed(4), 0, 42, 5), + Error::::NoPermission + ); + }); +} + +#[test] +fn max_supply_should_work() { + new_test_ext().execute_with(|| { + let collection_id = 0; + let user_id = 1; + let max_supply = 1; + + // validate set_collection_max_supply + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + assert_eq!(CollectionConfigOf::::get(collection_id).unwrap().max_supply, None); + + assert_ok!(Nfts::set_collection_max_supply( + RuntimeOrigin::signed(user_id), + collection_id, + max_supply + )); + assert_eq!( + CollectionConfigOf::::get(collection_id).unwrap().max_supply, + Some(max_supply) + ); + + assert!(events().contains(&Event::::CollectionMaxSupplySet { + collection: collection_id, + max_supply, + })); + + assert_ok!(Nfts::set_collection_max_supply( + RuntimeOrigin::signed(user_id), + collection_id, + max_supply + 1 + )); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + CollectionSettings::from_disabled(CollectionSetting::UnlockedMaxSupply.into()) + )); + assert_noop!( + Nfts::set_collection_max_supply( + RuntimeOrigin::signed(user_id), + collection_id, + max_supply + 2 + ), + Error::::MaxSupplyLocked + ); + + // validate we can't mint more to max supply + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id, None)); + assert_noop!( + Nfts::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id, None), + Error::::MaxSupplyReached + ); + }); +} + +#[test] +fn mint_settings_should_work() { + new_test_ext().execute_with(|| { + let collection_id = 0; + let user_id = 1; + let item_id = 0; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); + assert_eq!( + ItemConfigOf::::get(collection_id, item_id) + .unwrap() + .settings + .get_disabled(), + ItemSettings::all_enabled().get_disabled() + ); + + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + CollectionConfig { + mint_settings: MintSettings { + default_item_settings: ItemSettings::from_disabled( + ItemSetting::Transferable | ItemSetting::UnlockedMetadata + ), + ..Default::default() + }, + ..default_collection_config() + } + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); + assert_eq!( + ItemConfigOf::::get(collection_id, item_id) + .unwrap() + .settings + .get_disabled(), + ItemSettings::from_disabled(ItemSetting::Transferable | ItemSetting::UnlockedMetadata) + .get_disabled() + ); + }); +} + +#[test] +fn set_price_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + None, + )); + + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + Some(1), + None, + )); + + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + Some(2), + Some(3) + )); + + let item = ItemPriceOf::::get(collection_id, item_1).unwrap(); + assert_eq!(item.0, 1); + assert_eq!(item.1, None); + + let item = ItemPriceOf::::get(collection_id, item_2).unwrap(); + assert_eq!(item.0, 2); + assert_eq!(item.1, Some(3)); + + assert!(events().contains(&Event::::ItemPriceSet { + collection: collection_id, + item: item_1, + price: 1, + whitelisted_buyer: None, + })); + + // validate we can unset the price + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + None, + None + )); + assert!(events().contains(&Event::::ItemPriceRemoved { + collection: collection_id, + item: item_2 + })); + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // ensure we can't set price when the items are non-transferable + let collection_id = 1; + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | CollectionSetting::DepositRequired + ) + )); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); + + assert_noop!( + Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_1, Some(2), None), + Error::::ItemsNonTransferable + ); + }); +} + +#[test] +fn buy_item_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let price_1 = 20; + let price_2 = 30; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); + + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1, None)); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1, None)); + + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_1), + collection_id, + item_1, + Some(price_1), + None, + )); + + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_1), + collection_id, + item_2, + Some(price_2), + Some(user_3), + )); + + // can't buy for less + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_1, 1), + Error::::BidTooLow + ); + + // pass the higher price to validate it will still deduct correctly + assert_ok!(Nfts::buy_item( + RuntimeOrigin::signed(user_2), + collection_id, + item_1, + price_1 + 1, + )); + + // validate the new owner & balances + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + assert_eq!(Balances::total_balance(&user_1), initial_balance + price_1); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price_1); + + // can't buy from yourself + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_1), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can't buy when the item is listed for a specific buyer + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_2, price_2), + Error::::NoPermission + ); + + // can buy when I'm a whitelisted buyer + assert_ok!(Nfts::buy_item(RuntimeOrigin::signed(user_3), collection_id, item_2, price_2)); + + assert!(events().contains(&Event::::ItemBought { + collection: collection_id, + item: item_2, + price: price_2, + seller: user_1, + buyer: user_3, + })); + + // ensure we reset the buyer field + assert!(!ItemPriceOf::::contains_key(collection_id, item_2)); + + // can't buy when item is not for sale + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_3, price_2), + Error::::NotForSale + ); + + // ensure we can't buy an item when the collection or an item are frozen + { + assert_ok!(Nfts::set_price( + RuntimeOrigin::signed(user_1), + collection_id, + item_3, + Some(price_1), + None, + )); + + // lock the collection + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_1), + collection_id, + CollectionSettings::from_disabled(CollectionSetting::TransferableItems.into()) + )); + + let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::ItemsNonTransferable + ); + + // unlock the collection + assert_ok!(Nfts::force_collection_config( + RuntimeOrigin::root(), + collection_id, + collection_config_with_all_settings_enabled(), + )); + + // lock the transfer + assert_ok!(Nfts::lock_item_transfer( + RuntimeOrigin::signed(user_1), + collection_id, + item_3, + )); + + let buy_item_call = mock::RuntimeCall::Nfts(crate::Call::::buy_item { + collection: collection_id, + item: item_3, + bid_price: price_1, + }); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::ItemLocked + ); + } + }); +} + +#[test] +fn pay_tips_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let collection_id = 0; + let item_id = 1; + let tip = 2; + let initial_balance = 100; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + Balances::make_free_balance_be(&user_3, initial_balance); + + assert_ok!(Nfts::pay_tips( + RuntimeOrigin::signed(user_1), + bvec![ + ItemTip { collection: collection_id, item: item_id, receiver: user_2, amount: tip }, + ItemTip { collection: collection_id, item: item_id, receiver: user_3, amount: tip }, + ] + )); + + assert_eq!(Balances::total_balance(&user_1), initial_balance - tip * 2); + assert_eq!(Balances::total_balance(&user_2), initial_balance + tip); + assert_eq!(Balances::total_balance(&user_3), initial_balance + tip); + + let events = events(); + assert!(events.contains(&Event::::TipSent { + collection: collection_id, + item: item_id, + sender: user_1, + receiver: user_2, + amount: tip, + })); + assert!(events.contains(&Event::::TipSent { + collection: collection_id, + item: item_id, + sender: user_1, + receiver: user_3, + amount: tip, + })); + }); +} + +#[test] +fn create_cancel_swap_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let user_id = 1; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let price = 1; + let price_direction = PriceDirection::Receive; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + let duration = 2; + let expect_deadline = 3; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + user_id, + None, + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + user_id, + None, + )); + + // validate desired item and the collection exists + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2 + 1), + Some(price_with_direction.clone()), + duration, + ), + Error::::UnknownItem + ); + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id + 1, + None, + Some(price_with_direction.clone()), + duration, + ), + Error::::UnknownCollection + ); + + let max_duration: u64 = ::MaxDeadlineDuration::get(); + assert_noop!( + Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + max_duration.saturating_add(1), + ), + Error::::WrongDuration + ); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + + let swap = PendingSwapOf::::get(collection_id, item_1).unwrap(); + assert_eq!(swap.desired_collection, collection_id); + assert_eq!(swap.desired_item, Some(item_2)); + assert_eq!(swap.price, Some(price_with_direction.clone())); + assert_eq!(swap.deadline, expect_deadline); + + assert!(events().contains(&Event::::SwapCreated { + offered_collection: collection_id, + offered_item: item_1, + desired_collection: collection_id, + desired_item: Some(item_2), + price: Some(price_with_direction.clone()), + deadline: expect_deadline, + })); + + // validate we can cancel the swap + assert_ok!(Nfts::cancel_swap(RuntimeOrigin::signed(user_id), collection_id, item_1)); + assert!(events().contains(&Event::::SwapCancelled { + offered_collection: collection_id, + offered_item: item_1, + desired_collection: collection_id, + desired_item: Some(item_2), + price: Some(price_with_direction.clone()), + deadline: expect_deadline, + })); + assert!(!PendingSwapOf::::contains_key(collection_id, item_1)); + + // validate anyone can cancel the expired swap + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + assert_noop!( + Nfts::cancel_swap(RuntimeOrigin::signed(user_id + 1), collection_id, item_1), + Error::::NoPermission + ); + System::set_block_number(expect_deadline + 1); + assert_ok!(Nfts::cancel_swap(RuntimeOrigin::signed(user_id + 1), collection_id, item_1)); + + // validate optional desired_item param + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_id), + collection_id, + item_1, + collection_id, + None, + Some(price_with_direction), + duration, + )); + + let swap = PendingSwapOf::::get(collection_id, item_1).unwrap(); + assert_eq!(swap.desired_item, None); + }); +} + +#[test] +fn claim_swap_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let user_1 = 1; + let user_2 = 2; + let collection_id = 0; + let item_1 = 1; + let item_2 = 2; + let item_3 = 3; + let item_4 = 4; + let item_5 = 5; + let price = 100; + let price_direction = PriceDirection::Receive; + let price_with_direction = + PriceWithDirection { amount: price, direction: price_direction.clone() }; + let duration = 2; + let initial_balance = 1000; + let deadline = 1 + duration; + + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_1,user_1, + None, + )); + assert_ok!(Nfts::force_mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_2, + user_2, + default_item_config(), + )); + assert_ok!(Nfts::force_mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_3, + user_2, + default_item_config(), + )); + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_4, + user_1, + None, + )); + assert_ok!(Nfts::force_mint( + RuntimeOrigin::signed(user_1), + collection_id, + item_5, + user_2, + default_item_config(), + )); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_1), + collection_id, + item_1, + collection_id, + Some(item_2), + Some(price_with_direction.clone()), + duration, + )); + + // validate the deadline + System::set_block_number(5); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::DeadlineExpired + ); + System::set_block_number(1); + + // validate edge cases + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_4, // no swap was created for that asset + Some(price_with_direction.clone()), + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_4, // not my item + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::NoPermission + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_5, // my item, but not the one another part wants + collection_id, + item_1, + Some(price_with_direction.clone()), + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(PriceWithDirection { amount: price + 1, direction: price_direction.clone() }), // wrong price + ), + Error::::UnknownSwap + ); + assert_noop!( + Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(PriceWithDirection { amount: price, direction: PriceDirection::Send }), // wrong direction + ), + Error::::UnknownSwap + ); + + assert_ok!(Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_2, + collection_id, + item_1, + Some(price_with_direction.clone()), + )); + + // validate the new owner + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_2); + let item = Item::::get(collection_id, item_2).unwrap(); + assert_eq!(item.owner, user_1); + + // validate the balances + assert_eq!(Balances::total_balance(&user_1), initial_balance + price); + assert_eq!(Balances::total_balance(&user_2), initial_balance - price); + + // ensure we reset the swap + assert!(!PendingSwapOf::::contains_key(collection_id, item_1)); + + // validate the event + assert!(events().contains(&Event::::SwapClaimed { + sent_collection: collection_id, + sent_item: item_2, + sent_item_owner: user_2, + received_collection: collection_id, + received_item: item_1, + received_item_owner: user_1, + price: Some(price_with_direction.clone()), + deadline, + })); + + // validate the optional desired_item param and another price direction + let price_direction = PriceDirection::Send; + let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; + Balances::make_free_balance_be(&user_1, initial_balance); + Balances::make_free_balance_be(&user_2, initial_balance); + + assert_ok!(Nfts::create_swap( + RuntimeOrigin::signed(user_1), + collection_id, + item_4, + collection_id, + None, + Some(price_with_direction.clone()), + duration, + )); + assert_ok!(Nfts::claim_swap( + RuntimeOrigin::signed(user_2), + collection_id, + item_1, + collection_id, + item_4, + Some(price_with_direction), + )); + let item = Item::::get(collection_id, item_1).unwrap(); + assert_eq!(item.owner, user_1); + let item = Item::::get(collection_id, item_4).unwrap(); + assert_eq!(item.owner, user_2); + + assert_eq!(Balances::total_balance(&user_1), initial_balance - price); + assert_eq!(Balances::total_balance(&user_2), initial_balance + price); + }); +} + +#[test] +fn various_collection_settings() { + new_test_ext().execute_with(|| { + // when we set only one value it's required to call .into() on it + let config = + collection_config_from_disabled_settings(CollectionSetting::TransferableItems.into()); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); + + let config = CollectionConfigOf::::get(0).unwrap(); + assert!(!config.is_setting_enabled(CollectionSetting::TransferableItems)); + assert!(config.is_setting_enabled(CollectionSetting::UnlockedMetadata)); + + // no need to call .into() for multiple values + let config = collection_config_from_disabled_settings( + CollectionSetting::UnlockedMetadata | CollectionSetting::TransferableItems, + ); + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, config)); + + let config = CollectionConfigOf::::get(1).unwrap(); + assert!(!config.is_setting_enabled(CollectionSetting::TransferableItems)); + assert!(!config.is_setting_enabled(CollectionSetting::UnlockedMetadata)); + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), 1, default_collection_config())); + }); +} + +#[test] +fn collection_locking_should_work() { + new_test_ext().execute_with(|| { + let user_id = 1; + let collection_id = 0; + + assert_ok!(Nfts::force_create( + RuntimeOrigin::root(), + user_id, + collection_config_with_all_settings_enabled() + )); + + let lock_config = + collection_config_from_disabled_settings(CollectionSetting::DepositRequired.into()); + assert_noop!( + Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + lock_config.settings, + ), + Error::::WrongSetting + ); + + // validate partial lock + let lock_config = collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | CollectionSetting::UnlockedAttributes, + ); + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + lock_config.settings, + )); + + let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); + assert_eq!(stored_config, lock_config); + + // validate full lock + assert_ok!(Nfts::lock_collection( + RuntimeOrigin::signed(user_id), + collection_id, + CollectionSettings::from_disabled(CollectionSetting::UnlockedMetadata.into()), + )); + + let stored_config = CollectionConfigOf::::get(collection_id).unwrap(); + let full_lock_config = collection_config_from_disabled_settings( + CollectionSetting::TransferableItems | + CollectionSetting::UnlockedMetadata | + CollectionSetting::UnlockedAttributes, + ); + assert_eq!(stored_config, full_lock_config); + }); +} + +#[test] +fn pallet_level_feature_flags_should_work() { + new_test_ext().execute_with(|| { + Features::set(&PalletFeatures::from_disabled( + PalletFeature::Trading | PalletFeature::Approvals | PalletFeature::Attributes, + )); + + let user_id = 1; + let collection_id = 0; + let item_id = 1; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_id, default_collection_config())); + + assert_ok!(Nfts::mint( + RuntimeOrigin::signed(user_id), + collection_id, + item_id, + user_id, + None, + )); + + // PalletFeature::Trading + assert_noop!( + Nfts::set_price(RuntimeOrigin::signed(user_id), collection_id, item_id, Some(1), None), + Error::::MethodDisabled + ); + assert_noop!( + Nfts::buy_item(RuntimeOrigin::signed(user_id), collection_id, item_id, 1), + Error::::MethodDisabled + ); + + // PalletFeature::Approvals + assert_noop!( + Nfts::approve_transfer(RuntimeOrigin::signed(user_id), collection_id, item_id, 2, None), + Error::::MethodDisabled + ); + + // PalletFeature::Attributes + assert_noop!( + Nfts::set_attribute( + RuntimeOrigin::signed(user_id), + collection_id, + None, + AttributeNamespace::CollectionOwner, + bvec![0], + bvec![0], + ), + Error::::MethodDisabled + ); + }) +} + +#[test] +fn group_roles_by_account_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Nfts::group_roles_by_account(vec![]), vec![]); + + let account_to_role = Nfts::group_roles_by_account(vec![ + (3, CollectionRole::Freezer), + (1, CollectionRole::Issuer), + (2, CollectionRole::Admin), + ]); + let expect = vec![ + (1, CollectionRoles(CollectionRole::Issuer.into())), + (2, CollectionRoles(CollectionRole::Admin.into())), + (3, CollectionRoles(CollectionRole::Freezer.into())), + ]; + assert_eq!(account_to_role, expect); + + let account_to_role = Nfts::group_roles_by_account(vec![ + (3, CollectionRole::Freezer), + (2, CollectionRole::Issuer), + (2, CollectionRole::Admin), + ]); + let expect = vec![ + (2, CollectionRoles(CollectionRole::Issuer | CollectionRole::Admin)), + (3, CollectionRoles(CollectionRole::Freezer.into())), + ]; + assert_eq!(account_to_role, expect); + }) +} + +#[test] +fn add_remove_item_attributes_approval_should_work() { + new_test_ext().execute_with(|| { + let user_1 = 1; + let user_2 = 2; + let user_3 = 3; + let user_4 = 4; + let collection_id = 0; + let item_id = 0; + + assert_ok!(Nfts::force_create(RuntimeOrigin::root(), user_1, default_collection_config())); + assert_ok!(Nfts::mint(RuntimeOrigin::signed(user_1), collection_id, item_id, user_1, None)); + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_2]); + + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_3, + )); + assert_ok!(Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_2, user_3]); + + assert_noop!( + Nfts::approve_item_attributes( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_4, + ), + Error::::ReachedApprovalLimit + ); + + assert_ok!(Nfts::cancel_item_attributes_approval( + RuntimeOrigin::signed(user_1), + collection_id, + item_id, + user_2, + CancelAttributesApprovalWitness { account_attributes: 1 }, + )); + assert_eq!(item_attributes_approvals(collection_id, item_id), vec![user_3]); + }) +} diff --git a/frame/nfts/src/types.rs b/frame/nfts/src/types.rs new file mode 100644 index 0000000000000..58b1acaaedf42 --- /dev/null +++ b/frame/nfts/src/types.rs @@ -0,0 +1,465 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the Nfts pallet. + +use super::*; +use crate::macros::*; +use codec::EncodeLike; +use enumflags2::{bitflags, BitFlags}; +use frame_support::{ + pallet_prelude::{BoundedVec, MaxEncodedLen}, + traits::Get, + BoundedBTreeMap, BoundedBTreeSet, +}; +use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; + +pub(super) type DepositBalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub(super) type CollectionDetailsFor = + CollectionDetails<::AccountId, DepositBalanceOf>; +pub(super) type ApprovalsOf = BoundedBTreeMap< + ::AccountId, + Option<::BlockNumber>, + >::ApprovalsLimit, +>; +pub(super) type ItemAttributesApprovals = + BoundedBTreeSet<::AccountId, >::ItemAttributesApprovalsLimit>; +pub(super) type ItemDepositOf = + ItemDeposit, ::AccountId>; +pub(super) type AttributeDepositOf = + AttributeDeposit, ::AccountId>; +pub(super) type ItemDetailsFor = + ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; +pub(super) type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub(super) type ItemPrice = BalanceOf; +pub(super) type ItemTipOf = ItemTip< + >::CollectionId, + >::ItemId, + ::AccountId, + BalanceOf, +>; +pub(super) type CollectionConfigFor = CollectionConfig< + BalanceOf, + ::BlockNumber, + >::CollectionId, +>; + +pub trait Incrementable { + fn increment(&self) -> Self; + fn initial_value() -> Self; +} +impl_incrementable!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); + +/// Information about a collection. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct CollectionDetails { + /// Collection's owner. + pub(super) owner: AccountId, + /// The total balance deposited by the owner for all the storage data associated with this + /// collection. Used by `destroy`. + pub(super) owner_deposit: DepositBalance, + /// The total number of outstanding items of this collection. + pub(super) items: u32, + /// The total number of outstanding item metadata of this collection. + pub(super) item_metadatas: u32, + /// The total number of attributes for this collection. + pub(super) attributes: u32, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct DestroyWitness { + /// The total number of outstanding items of this collection. + #[codec(compact)] + pub items: u32, + /// The total number of items in this collection that have outstanding item metadata. + #[codec(compact)] + pub item_metadatas: u32, + /// The total number of attributes for this collection. + #[codec(compact)] + pub attributes: u32, +} + +impl CollectionDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + items: self.items, + item_metadatas: self.item_metadatas, + attributes: self.attributes, + } + } +} + +/// Witness data for items mint transactions. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct MintWitness { + /// Provide the id of the item in a required collection. + pub owner_of_item: ItemId, +} + +/// Information concerning the ownership of a single unique item. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +pub struct ItemDetails { + /// The owner of this item. + pub(super) owner: AccountId, + /// The approved transferrer of this item, if one is set. + pub(super) approvals: Approvals, + /// The amount held in the pallet's default account for this item. Free-hold items will have + /// this as zero. + pub(super) deposit: Deposit, +} + +/// Information about the reserved item deposit. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct ItemDeposit { + /// A depositor account. + pub(super) account: AccountId, + /// An amount that gets reserved. + pub(super) amount: DepositBalance, +} + +/// Information about the collection's metadata. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(StringLimit))] +#[codec(mel_bound(DepositBalance: MaxEncodedLen))] +pub struct CollectionMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this collection. Limited in length by `StringLimit`. This + /// will generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, +} + +/// Information about the item's metadata. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(StringLimit))] +#[codec(mel_bound(DepositBalance: MaxEncodedLen))] +pub struct ItemMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this item. Limited in length by `StringLimit`. This will + /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, +} + +/// Information about the tip. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct ItemTip { + /// The collection of the item. + pub(super) collection: CollectionId, + /// An item of which the tip is sent for. + pub(super) item: ItemId, + /// A sender of the tip. + pub(super) receiver: AccountId, + /// An amount the sender is willing to tip. + pub(super) amount: Amount, +} + +/// Information about the pending swap. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] +pub struct PendingSwap { + /// The collection that contains the item that the user wants to receive. + pub(super) desired_collection: CollectionId, + /// The item the user wants to receive. + pub(super) desired_item: Option, + /// A price for the desired `item` with the direction. + pub(super) price: Option, + /// An optional deadline for the swap. + pub(super) deadline: Deadline, +} + +/// Information about the reserved attribute deposit. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct AttributeDeposit { + /// A depositor account. + pub(super) account: Option, + /// An amount that gets reserved. + pub(super) amount: DepositBalance, +} + +/// Specifies whether the tokens will be sent or received. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum PriceDirection { + /// Tokens will be sent. + Send, + /// Tokens will be received. + Receive, +} + +/// Holds the details about the price. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct PriceWithDirection { + /// An amount. + pub(super) amount: Amount, + /// A direction (send or receive). + pub(super) direction: PriceDirection, +} + +/// Support for up to 64 user-enabled features on a collection. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum CollectionSetting { + /// Items in this collection are transferable. + TransferableItems, + /// The metadata of this collection can be modified. + UnlockedMetadata, + /// Attributes of this collection can be modified. + UnlockedAttributes, + /// The supply of this collection can be modified. + UnlockedMaxSupply, + /// When this isn't set then the deposit is required to hold the items of this collection. + DepositRequired, +} + +/// Wrapper type for `BitFlags` that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct CollectionSettings(pub BitFlags); + +impl CollectionSettings { + pub fn all_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn get_disabled(&self) -> BitFlags { + self.0 + } + pub fn is_disabled(&self, setting: CollectionSetting) -> bool { + self.0.contains(setting) + } + pub fn from_disabled(settings: BitFlags) -> Self { + Self(settings) + } +} + +impl_codec_bitflags!(CollectionSettings, u64, CollectionSetting); + +/// Mint type. Can the NFT be create by anyone, or only the creator of the collection, +/// or only by wallets that already hold an NFT from a certain collection? +/// The ownership of a privately minted NFT is still publicly visible. +#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum MintType { + /// Only an `Issuer` could mint items. + Issuer, + /// Anyone could mint items. + Public, + /// Only holders of items in specified collection could mint new items. + HolderOf(CollectionId), +} + +/// Holds the information about minting. +#[derive(Clone, Copy, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct MintSettings { + /// Whether anyone can mint or if minters are restricted to some subset. + pub(super) mint_type: MintType, + /// An optional price per mint. + pub(super) price: Option, + /// When the mint starts. + pub(super) start_block: Option, + /// When the mint ends. + pub(super) end_block: Option, + /// Default settings each item will get during the mint. + pub(super) default_item_settings: ItemSettings, +} + +impl Default for MintSettings { + fn default() -> Self { + Self { + mint_type: MintType::Issuer, + price: None, + start_block: None, + end_block: None, + default_item_settings: ItemSettings::all_enabled(), + } + } +} + +/// A witness data to cancel attributes approval operation. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct CancelAttributesApprovalWitness { + /// An amount of attributes previously created by account. + pub account_attributes: u32, +} + +/// A list of possible pallet-level attributes. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum PalletAttributes { + /// Marks an item as being used in order to claim another item. + UsedToClaim(CollectionId), +} + +/// Collection's configuration. +#[derive( + Clone, Copy, Decode, Default, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo, +)] +pub struct CollectionConfig { + /// Collection's settings. + pub(super) settings: CollectionSettings, + /// Collection's max supply. + pub(super) max_supply: Option, + /// Default settings each item will get during the mint. + pub(super) mint_settings: MintSettings, +} + +impl CollectionConfig { + pub fn is_setting_enabled(&self, setting: CollectionSetting) -> bool { + !self.settings.is_disabled(setting) + } + pub fn has_disabled_setting(&self, setting: CollectionSetting) -> bool { + self.settings.is_disabled(setting) + } + pub fn enable_setting(&mut self, setting: CollectionSetting) { + self.settings.0.remove(setting); + } + pub fn disable_setting(&mut self, setting: CollectionSetting) { + self.settings.0.insert(setting); + } +} + +/// Support for up to 64 user-enabled features on an item. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum ItemSetting { + /// This item is transferable. + Transferable, + /// The metadata of this item can be modified. + UnlockedMetadata, + /// Attributes of this item can be modified. + UnlockedAttributes, +} + +/// Wrapper type for `BitFlags` that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct ItemSettings(pub BitFlags); + +impl ItemSettings { + pub fn all_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn get_disabled(&self) -> BitFlags { + self.0 + } + pub fn is_disabled(&self, setting: ItemSetting) -> bool { + self.0.contains(setting) + } + pub fn from_disabled(settings: BitFlags) -> Self { + Self(settings) + } +} + +impl_codec_bitflags!(ItemSettings, u64, ItemSetting); + +/// Item's configuration. +#[derive( + Encode, Decode, Default, PartialEq, RuntimeDebug, Clone, Copy, MaxEncodedLen, TypeInfo, +)] +pub struct ItemConfig { + /// Item's settings. + pub(super) settings: ItemSettings, +} + +impl ItemConfig { + pub fn is_setting_enabled(&self, setting: ItemSetting) -> bool { + !self.settings.is_disabled(setting) + } + pub fn has_disabled_setting(&self, setting: ItemSetting) -> bool { + self.settings.is_disabled(setting) + } + pub fn has_disabled_settings(&self) -> bool { + !self.settings.get_disabled().is_empty() + } + pub fn enable_setting(&mut self, setting: ItemSetting) { + self.settings.0.remove(setting); + } + pub fn disable_setting(&mut self, setting: ItemSetting) { + self.settings.0.insert(setting); + } +} + +/// Support for up to 64 system-enabled features on a collection. +#[bitflags] +#[repr(u64)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum PalletFeature { + /// Enable/disable trading operations. + Trading, + /// Allow/disallow setting attributes. + Attributes, + /// Allow/disallow transfer approvals. + Approvals, + /// Allow/disallow atomic items swap. + Swaps, +} + +/// Wrapper type for `BitFlags` that implements `Codec`. +#[derive(Default, RuntimeDebug)] +pub struct PalletFeatures(pub BitFlags); + +impl PalletFeatures { + pub fn all_enabled() -> Self { + Self(BitFlags::EMPTY) + } + pub fn from_disabled(features: BitFlags) -> Self { + Self(features) + } + pub fn is_enabled(&self, feature: PalletFeature) -> bool { + !self.0.contains(feature) + } +} +impl_codec_bitflags!(PalletFeatures, u64, PalletFeature); + +/// Support for up to 8 different roles for collections. +#[bitflags] +#[repr(u8)] +#[derive(Copy, Clone, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub enum CollectionRole { + /// Can mint items. + Issuer, + /// Can freeze items. + Freezer, + /// Can thaw items, force transfers and burn items from any account. + Admin, +} + +/// A wrapper type that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Eq, Default, RuntimeDebug)] +pub struct CollectionRoles(pub BitFlags); + +impl CollectionRoles { + pub fn none() -> Self { + Self(BitFlags::EMPTY) + } + pub fn has_role(&self, role: CollectionRole) -> bool { + self.0.contains(role) + } + pub fn add_role(&mut self, role: CollectionRole) { + self.0.insert(role); + } + pub fn max_roles() -> u8 { + let all: BitFlags = BitFlags::all(); + all.len() as u8 + } +} +impl_codec_bitflags!(CollectionRoles, u8, CollectionRole); diff --git a/frame/nfts/src/weights.rs b/frame/nfts/src/weights.rs new file mode 100644 index 0000000000000..f05f8ca514c3e --- /dev/null +++ b/frame/nfts/src/weights.rs @@ -0,0 +1,851 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_nfts +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-12-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// /home/benchbot/cargo_target_dir/production/substrate +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_nfts +// --chain=dev +// --header=./HEADER-APACHE2 +// --output=./frame/nfts/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_nfts. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(n: u32, m: u32, a: u32, ) -> Weight; + fn mint() -> Weight; + fn force_mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn redeposit(i: u32, ) -> Weight; + fn lock_item_transfer() -> Weight; + fn unlock_item_transfer() -> Weight; + fn lock_collection() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn force_collection_owner() -> Weight; + fn force_collection_config() -> Weight; + fn lock_item_properties() -> Weight; + fn set_attribute() -> Weight; + fn force_set_attribute() -> Weight; + fn clear_attribute() -> Weight; + fn approve_item_attributes() -> Weight; + fn cancel_item_attributes_approval(n: u32, ) -> Weight; + fn set_metadata() -> Weight; + fn clear_metadata() -> Weight; + fn set_collection_metadata() -> Weight; + fn clear_collection_metadata() -> Weight; + fn approve_transfer() -> Weight; + fn cancel_approval() -> Weight; + fn clear_all_transfer_approvals() -> Weight; + fn set_accept_ownership() -> Weight; + fn set_collection_max_supply() -> Weight; + fn update_mint_settings() -> Weight; + fn set_price() -> Weight; + fn buy_item() -> Weight; + fn pay_tips(n: u32, ) -> Weight; + fn create_swap() -> Weight; + fn cancel_swap() -> Weight; + fn claim_swap() -> Weight; +} + +/// Weights for pallet_nfts using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Nfts NextCollectionId (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) + fn create() -> Weight { + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: Nfts NextCollectionId (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) + fn force_create() -> Weight { + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1001 w:1000) + // Storage: Nfts Attribute (r:1001 w:1000) + // Storage: Nfts ItemMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionMetadataOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1000) + // Storage: Nfts Account (r:0 w:1000) + // Storage: Nfts CollectionAccount (r:0 w:1) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + // Minimum execution time: 19_183_393 nanoseconds. + Weight::from_ref_time(17_061_526_855) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(353_523).saturating_mul(n.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(1_861_080).saturating_mul(m.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(8_858_987).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(1003)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(3005)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + } + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + fn mint() -> Weight { + // Minimum execution time: 57_753 nanoseconds. + Weight::from_ref_time(58_313_000) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + fn force_mint() -> Weight { + // Minimum execution time: 56_429 nanoseconds. + Weight::from_ref_time(57_202_000) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn burn() -> Weight { + // Minimum execution time: 59_681 nanoseconds. + Weight::from_ref_time(60_058_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(7)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn transfer() -> Weight { + // Minimum execution time: 66_085 nanoseconds. + Weight::from_ref_time(67_065_000) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Item (r:102 w:102) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Minimum execution time: 25_949 nanoseconds. + Weight::from_ref_time(26_106_000) + // Standard Error: 10_326 + .saturating_add(Weight::from_ref_time(11_496_776).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_transfer() -> Weight { + // Minimum execution time: 30_080 nanoseconds. + Weight::from_ref_time(30_825_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn unlock_item_transfer() -> Weight { + // Minimum execution time: 30_612 nanoseconds. + Weight::from_ref_time(31_422_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn lock_collection() -> Weight { + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(28_015_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) + fn transfer_ownership() -> Weight { + // Minimum execution time: 33_750 nanoseconds. + Weight::from_ref_time(34_139_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:4) + fn set_team() -> Weight { + // Minimum execution time: 36_565 nanoseconds. + Weight::from_ref_time(37_464_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) + fn force_collection_owner() -> Weight { + // Minimum execution time: 29_028 nanoseconds. + Weight::from_ref_time(29_479_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_config() -> Weight { + // Minimum execution time: 24_695 nanoseconds. + Weight::from_ref_time(25_304_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_properties() -> Weight { + // Minimum execution time: 28_910 nanoseconds. + Weight::from_ref_time(29_186_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn set_attribute() -> Weight { + // Minimum execution time: 56_407 nanoseconds. + Weight::from_ref_time(58_176_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:1) + fn force_set_attribute() -> Weight { + // Minimum execution time: 36_402 nanoseconds. + Weight::from_ref_time(37_034_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + fn clear_attribute() -> Weight { + // Minimum execution time: 52_022 nanoseconds. + Weight::from_ref_time(54_059_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + fn approve_item_attributes() -> Weight { + // Minimum execution time: 28_475 nanoseconds. + Weight::from_ref_time(29_162_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Minimum execution time: 37_529 nanoseconds. + Weight::from_ref_time(38_023_000) + // Standard Error: 8_136 + .saturating_add(Weight::from_ref_time(7_452_872).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + // Minimum execution time: 49_300 nanoseconds. + Weight::from_ref_time(49_790_000) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts ItemMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + // Minimum execution time: 47_248 nanoseconds. + Weight::from_ref_time(48_094_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) + fn set_collection_metadata() -> Weight { + // Minimum execution time: 44_137 nanoseconds. + Weight::from_ref_time(44_905_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) + fn clear_collection_metadata() -> Weight { + // Minimum execution time: 43_005 nanoseconds. + Weight::from_ref_time(43_898_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn approve_transfer() -> Weight { + // Minimum execution time: 36_344 nanoseconds. + Weight::from_ref_time(36_954_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn cancel_approval() -> Weight { + // Minimum execution time: 32_418 nanoseconds. + Weight::from_ref_time(33_029_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn clear_all_transfer_approvals() -> Weight { + // Minimum execution time: 31_448 nanoseconds. + Weight::from_ref_time(31_979_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + fn set_accept_ownership() -> Weight { + // Minimum execution time: 27_487 nanoseconds. + Weight::from_ref_time(28_080_000) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts CollectionConfigOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) + fn set_collection_max_supply() -> Weight { + // Minimum execution time: 28_235 nanoseconds. + Weight::from_ref_time(28_967_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn update_mint_settings() -> Weight { + // Minimum execution time: 28_172 nanoseconds. + Weight::from_ref_time(28_636_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + // Minimum execution time: 35_336 nanoseconds. + Weight::from_ref_time(36_026_000) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts ItemPriceOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn buy_item() -> Weight { + // Minimum execution time: 70_971 nanoseconds. + Weight::from_ref_time(72_036_000) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + // Minimum execution time: 5_151 nanoseconds. + Weight::from_ref_time(11_822_888) + // Standard Error: 38_439 + .saturating_add(Weight::from_ref_time(3_511_844).saturating_mul(n.into())) + } + // Storage: Nfts Item (r:2 w:0) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn create_swap() -> Weight { + // Minimum execution time: 33_027 nanoseconds. + Weight::from_ref_time(33_628_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts PendingSwapOf (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + fn cancel_swap() -> Weight { + // Minimum execution time: 35_890 nanoseconds. + Weight::from_ref_time(36_508_000) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:2 w:2) + // Storage: Nfts PendingSwapOf (r:1 w:2) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:2 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:4) + // Storage: Nfts ItemPriceOf (r:0 w:2) + fn claim_swap() -> Weight { + // Minimum execution time: 101_076 nanoseconds. + Weight::from_ref_time(101_863_000) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(11)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Nfts NextCollectionId (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) + fn create() -> Weight { + // Minimum execution time: 44_312 nanoseconds. + Weight::from_ref_time(44_871_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + // Storage: Nfts NextCollectionId (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts CollectionAccount (r:0 w:1) + fn force_create() -> Weight { + // Minimum execution time: 31_654 nanoseconds. + Weight::from_ref_time(32_078_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1001 w:1000) + // Storage: Nfts Attribute (r:1001 w:1000) + // Storage: Nfts ItemMetadataOf (r:0 w:1000) + // Storage: Nfts CollectionRoleOf (r:0 w:1) + // Storage: Nfts CollectionMetadataOf (r:0 w:1) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + // Storage: Nfts ItemConfigOf (r:0 w:1000) + // Storage: Nfts Account (r:0 w:1000) + // Storage: Nfts CollectionAccount (r:0 w:1) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + // Minimum execution time: 19_183_393 nanoseconds. + Weight::from_ref_time(17_061_526_855) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(353_523).saturating_mul(n.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(1_861_080).saturating_mul(m.into())) + // Standard Error: 16_689 + .saturating_add(Weight::from_ref_time(8_858_987).saturating_mul(a.into())) + .saturating_add(RocksDbWeight::get().reads(1003)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(RocksDbWeight::get().writes(3005)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) + } + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + fn mint() -> Weight { + // Minimum execution time: 57_753 nanoseconds. + Weight::from_ref_time(58_313_000) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + fn force_mint() -> Weight { + // Minimum execution time: 56_429 nanoseconds. + Weight::from_ref_time(57_202_000) + .saturating_add(RocksDbWeight::get().reads(5)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + // Storage: Nfts Account (r:0 w:1) + // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn burn() -> Weight { + // Minimum execution time: 59_681 nanoseconds. + Weight::from_ref_time(60_058_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(7)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts ItemPriceOf (r:0 w:1) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn transfer() -> Weight { + // Minimum execution time: 66_085 nanoseconds. + Weight::from_ref_time(67_065_000) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(6)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Item (r:102 w:102) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Minimum execution time: 25_949 nanoseconds. + Weight::from_ref_time(26_106_000) + // Standard Error: 10_326 + .saturating_add(Weight::from_ref_time(11_496_776).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_transfer() -> Weight { + // Minimum execution time: 30_080 nanoseconds. + Weight::from_ref_time(30_825_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn unlock_item_transfer() -> Weight { + // Minimum execution time: 30_612 nanoseconds. + Weight::from_ref_time(31_422_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts CollectionRoleOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn lock_collection() -> Weight { + // Minimum execution time: 27_470 nanoseconds. + Weight::from_ref_time(28_015_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) + fn transfer_ownership() -> Weight { + // Minimum execution time: 33_750 nanoseconds. + Weight::from_ref_time(34_139_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(4)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:0 w:4) + fn set_team() -> Weight { + // Minimum execution time: 36_565 nanoseconds. + Weight::from_ref_time(37_464_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(5)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionAccount (r:0 w:2) + fn force_collection_owner() -> Weight { + // Minimum execution time: 29_028 nanoseconds. + Weight::from_ref_time(29_479_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(3)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:0 w:1) + fn force_collection_config() -> Weight { + // Minimum execution time: 24_695 nanoseconds. + Weight::from_ref_time(25_304_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:1) + fn lock_item_properties() -> Weight { + // Minimum execution time: 28_910 nanoseconds. + Weight::from_ref_time(29_186_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts Attribute (r:1 w:1) + fn set_attribute() -> Weight { + // Minimum execution time: 56_407 nanoseconds. + Weight::from_ref_time(58_176_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:1) + fn force_set_attribute() -> Weight { + // Minimum execution time: 36_402 nanoseconds. + Weight::from_ref_time(37_034_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts Attribute (r:1 w:1) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + fn clear_attribute() -> Weight { + // Minimum execution time: 52_022 nanoseconds. + Weight::from_ref_time(54_059_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + fn approve_item_attributes() -> Weight { + // Minimum execution time: 28_475 nanoseconds. + Weight::from_ref_time(29_162_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) + // Storage: Nfts Attribute (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Minimum execution time: 37_529 nanoseconds. + Weight::from_ref_time(38_023_000) + // Standard Error: 8_136 + .saturating_add(Weight::from_ref_time(7_452_872).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(2)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + // Minimum execution time: 49_300 nanoseconds. + Weight::from_ref_time(49_790_000) + .saturating_add(RocksDbWeight::get().reads(4)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts ItemMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + // Minimum execution time: 47_248 nanoseconds. + Weight::from_ref_time(48_094_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts Collection (r:1 w:1) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) + fn set_collection_metadata() -> Weight { + // Minimum execution time: 44_137 nanoseconds. + Weight::from_ref_time(44_905_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(2)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionMetadataOf (r:1 w:1) + fn clear_collection_metadata() -> Weight { + // Minimum execution time: 43_005 nanoseconds. + Weight::from_ref_time(43_898_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn approve_transfer() -> Weight { + // Minimum execution time: 36_344 nanoseconds. + Weight::from_ref_time(36_954_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn cancel_approval() -> Weight { + // Minimum execution time: 32_418 nanoseconds. + Weight::from_ref_time(33_029_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts CollectionRoleOf (r:1 w:0) + fn clear_all_transfer_approvals() -> Weight { + // Minimum execution time: 31_448 nanoseconds. + Weight::from_ref_time(31_979_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts OwnershipAcceptance (r:1 w:1) + fn set_accept_ownership() -> Weight { + // Minimum execution time: 27_487 nanoseconds. + Weight::from_ref_time(28_080_000) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts CollectionConfigOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) + fn set_collection_max_supply() -> Weight { + // Minimum execution time: 28_235 nanoseconds. + Weight::from_ref_time(28_967_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:1) + fn update_mint_settings() -> Weight { + // Minimum execution time: 28_172 nanoseconds. + Weight::from_ref_time(28_636_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: Nfts ItemPriceOf (r:0 w:1) + fn set_price() -> Weight { + // Minimum execution time: 35_336 nanoseconds. + Weight::from_ref_time(36_026_000) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:1 w:1) + // Storage: Nfts ItemPriceOf (r:1 w:1) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:2) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn buy_item() -> Weight { + // Minimum execution time: 70_971 nanoseconds. + Weight::from_ref_time(72_036_000) + .saturating_add(RocksDbWeight::get().reads(6)) + .saturating_add(RocksDbWeight::get().writes(6)) + } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + // Minimum execution time: 5_151 nanoseconds. + Weight::from_ref_time(11_822_888) + // Standard Error: 38_439 + .saturating_add(Weight::from_ref_time(3_511_844).saturating_mul(n.into())) + } + // Storage: Nfts Item (r:2 w:0) + // Storage: Nfts PendingSwapOf (r:0 w:1) + fn create_swap() -> Weight { + // Minimum execution time: 33_027 nanoseconds. + Weight::from_ref_time(33_628_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts PendingSwapOf (r:1 w:1) + // Storage: Nfts Item (r:1 w:0) + fn cancel_swap() -> Weight { + // Minimum execution time: 35_890 nanoseconds. + Weight::from_ref_time(36_508_000) + .saturating_add(RocksDbWeight::get().reads(2)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + // Storage: Nfts Item (r:2 w:2) + // Storage: Nfts PendingSwapOf (r:1 w:2) + // Storage: Nfts Collection (r:1 w:0) + // Storage: Nfts CollectionConfigOf (r:1 w:0) + // Storage: Nfts ItemConfigOf (r:2 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Nfts Account (r:0 w:4) + // Storage: Nfts ItemPriceOf (r:0 w:2) + fn claim_swap() -> Weight { + // Minimum execution time: 101_076 nanoseconds. + Weight::from_ref_time(101_863_000) + .saturating_add(RocksDbWeight::get().reads(8)) + .saturating_add(RocksDbWeight::get().writes(11)) + } +} diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 6959aa9783ee5..06a66838594c7 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -102,7 +102,7 @@ impl pallet_staking::Config for Runtime { type Reward = (); type SessionsPerEra = (); type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = ConstU32<3>; type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 568dec7b3a340..c67aec0134b07 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -116,7 +116,7 @@ impl pallet_staking::Config for Runtime { type Reward = (); type SessionsPerEra = (); type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index de3a4eca6308d..223f551a6e5c1 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -173,7 +173,7 @@ impl pallet_staking::Config for Test { type Reward = (); type SessionsPerEra = (); type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs index 65bfcad4b26fc..e1d9ee14ee2fc 100644 --- a/frame/root-offences/src/mock.rs +++ b/frame/root-offences/src/mock.rs @@ -184,7 +184,7 @@ impl pallet_staking::Config for Test { type Reward = (); type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 2db7eb385111c..0699640bc092a 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -167,7 +167,7 @@ impl pallet_staking::Config for Test { type Reward = (); type SessionsPerEra = (); type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 81fa0f9d81dbf..b3d32b26ec1f7 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -922,6 +922,13 @@ benchmarks! { ); } + set_min_commission { + let min_commission = Perbill::max_value(); + }: _(RawOrigin::Root, min_commission) + verify { + assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); + } + impl_benchmark_test_suite!( Staking, crate::mock::ExtBuilder::default().has_stakers(true), diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index d3affda05277a..843e452125a6e 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -20,13 +20,14 @@ use crate::{self as pallet_staking, *}; use frame_election_provider_support::{onchain, SequentialPhragmen, VoteWeight}; use frame_support::{ - assert_ok, parameter_types, + assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, - OnUnbalanced, OneSessionHandler, + ConstU32, ConstU64, Currency, EitherOfDiverse, FindAuthor, GenesisBuild, Get, Hooks, + Imbalance, OnUnbalanced, OneSessionHandler, }, weights::constants::RocksDbWeight, }; +use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_io; use sp_runtime::{ @@ -292,7 +293,7 @@ impl crate::pallet::pallet::Config for Test { type Reward = MockReward; type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = frame_system::EnsureRoot; + type AdminOrigin = EnsureOneOrRoot; type BondingDuration = BondingDuration; type SessionInterface = Self; type EraPayout = ConvertCurve; @@ -797,6 +798,11 @@ pub(crate) fn staking_events() -> Vec> { parameter_types! { static StakingEventsIndex: usize = 0; } +ord_parameter_types! { + pub const One: u64 = 1; +} + +type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy>; pub(crate) fn staking_events_since_last_call() -> Vec> { let all: Vec<_> = System::events() diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index a7190d70c7061..db9aeba6fb58e 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1569,7 +1569,7 @@ impl StakingInterface for Pallet { } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = Self::slashing_spans(&who).iter().count() as u32; + let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 1d5babe7ffa8f..8e8a8d9c7f600 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -183,8 +183,10 @@ pub mod pallet { #[pallet::constant] type SlashDeferDuration: Get; - /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; + /// The origin which can manage less critical staking parameters that does not require root. + /// + /// Supported actions: (1) cancel deferred slash, (2) set minimum commission. + type AdminOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. type SessionInterface: SessionInterface; @@ -982,7 +984,8 @@ pub mod pallet { // `BondingDuration` to proceed with the unbonding. let maybe_withdraw_weight = { if unlocking == T::MaxUnlockingChunks::get() as usize { - let real_num_slashing_spans = Self::slashing_spans(&controller).iter().count(); + let real_num_slashing_spans = + Self::slashing_spans(&controller).map_or(0, |s| s.iter().count()); Some(Self::do_withdraw_unbonded(&controller, real_num_slashing_spans as u32)?) } else { None @@ -1451,7 +1454,7 @@ pub mod pallet { /// Cancel enactment of a deferred slash. /// - /// Can be called by the `T::SlashCancelOrigin`. + /// Can be called by the `T::AdminOrigin`. /// /// Parameters: era and indices of the slashes for that era to kill. #[pallet::call_index(17)] @@ -1461,7 +1464,7 @@ pub mod pallet { era: EraIndex, slash_indices: Vec, ) -> DispatchResult { - T::SlashCancelOrigin::ensure_origin(origin)?; + T::AdminOrigin::ensure_origin(origin)?; ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); @@ -1682,7 +1685,6 @@ pub mod pallet { config_op_exp!(MinCommission, min_commission); Ok(()) } - /// Declare a `controller` to stop participating as either a validator or nominator. /// /// Effects will be felt at the beginning of the next era. @@ -1791,6 +1793,18 @@ pub mod pallet { })?; Ok(()) } + + /// Sets the minimum amount of commission that each validators must maintain. + /// + /// This call has lower privilege requirements than `set_staking_config` and can be called + /// by the `T::AdminOrigin`. Root can always call this. + #[pallet::call_index(25)] + #[pallet::weight(T::WeightInfo::set_min_commission())] + pub fn set_min_commission(origin: OriginFor, new: Perbill) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + MinCommission::::put(new); + Ok(()) + } } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index fc6fc68e66d5d..46c3c97441938 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -5725,3 +5725,94 @@ fn scale_validator_count_errors() { ); }) } + +#[test] +fn set_min_commission_works_with_admin_origin() { + ExtBuilder::default().build_and_execute(|| { + // no minimum commission set initially + assert_eq!(MinCommission::::get(), Zero::zero()); + + // root can set min commission + assert_ok!(Staking::set_min_commission(RuntimeOrigin::root(), Perbill::from_percent(10))); + + assert_eq!(MinCommission::::get(), Perbill::from_percent(10)); + + // Non privileged origin can not set min_commission + assert_noop!( + Staking::set_min_commission(RuntimeOrigin::signed(2), Perbill::from_percent(15)), + BadOrigin + ); + + // Admin Origin can set min commission + assert_ok!(Staking::set_min_commission( + RuntimeOrigin::signed(1), + Perbill::from_percent(15), + )); + + // setting commission below min_commission fails + assert_noop!( + Staking::validate( + RuntimeOrigin::signed(10), + ValidatorPrefs { commission: Perbill::from_percent(14), blocked: false } + ), + Error::::CommissionTooLow + ); + + // setting commission >= min_commission works + assert_ok!(Staking::validate( + RuntimeOrigin::signed(10), + ValidatorPrefs { commission: Perbill::from_percent(15), blocked: false } + )); + }) +} + +mod staking_interface { + use frame_support::storage::with_storage_layer; + use sp_staking::StakingInterface; + + use super::*; + + #[test] + fn force_unstake_with_slash_works() { + ExtBuilder::default().build_and_execute(|| { + // without slash + let _ = with_storage_layer::<(), _, _>(|| { + // bond an account, can unstake + assert_eq!(Staking::bonded(&11), Some(10)); + assert_ok!(::force_unstake(11)); + Err(DispatchError::from("revert")) + }); + + // bond again and add a slash, still can unstake. + assert_eq!(Staking::bonded(&11), Some(10)); + add_slash(&11); + assert_ok!(::force_unstake(11)); + }); + } + + #[test] + fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() { + ExtBuilder::default().build_and_execute(|| { + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); + + assert_eq!(Staking::bonded(&11), Some(10)); + + assert_noop!( + Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0), + Error::::IncorrectSlashingSpans + ); + + let num_slashing_spans = Staking::slashing_spans(&11).map_or(0, |s| s.iter().count()); + assert_ok!(Staking::withdraw_unbonded( + RuntimeOrigin::signed(10), + num_slashing_spans as u32 + )); + }); + } +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index aebb8eeb9b06e..9c283f5a065e3 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-12-14, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-12-25, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -77,6 +77,7 @@ pub trait WeightInfo { fn set_staking_configs_all_remove() -> Weight; fn chill_other() -> Weight; fn force_apply_min_commission() -> Weight; + fn set_min_commission() -> Weight; } /// Weights for pallet_staking using the Substrate node and recommended hardware. @@ -88,8 +89,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 54_402 nanoseconds. - Weight::from_ref_time(55_096_000) + // Minimum execution time: 54_884 nanoseconds. + Weight::from_ref_time(55_487_000) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -99,8 +100,8 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListNodes (r:3 w:3) // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 94_407 nanoseconds. - Weight::from_ref_time(95_209_000) + // Minimum execution time: 95_115 nanoseconds. + Weight::from_ref_time(96_213_000) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -114,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 101_046 nanoseconds. - Weight::from_ref_time(101_504_000) + // Minimum execution time: 102_031 nanoseconds. + Weight::from_ref_time(102_842_000) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -125,10 +126,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_452 nanoseconds. - Weight::from_ref_time(47_031_537) - // Standard Error: 491 - .saturating_add(Weight::from_ref_time(67_148).saturating_mul(s.into())) + // Minimum execution time: 46_569 nanoseconds. + Weight::from_ref_time(48_034_493) + // Standard Error: 654 + .saturating_add(Weight::from_ref_time(63_628).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -148,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { - // Minimum execution time: 88_067 nanoseconds. - Weight::from_ref_time(93_309_587) - // Standard Error: 4_762 - .saturating_add(Weight::from_ref_time(1_114_938).saturating_mul(s.into())) + // Minimum execution time: 90_154 nanoseconds. + Weight::from_ref_time(95_725_631) + // Standard Error: 2_491 + .saturating_add(Weight::from_ref_time(1_110_795).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -168,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_308 nanoseconds. - Weight::from_ref_time(68_266_000) + // Minimum execution time: 67_978 nanoseconds. + Weight::from_ref_time(69_153_000) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -177,10 +178,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 40_913 nanoseconds. - Weight::from_ref_time(48_140_584) - // Standard Error: 13_396 - .saturating_add(Weight::from_ref_time(6_862_893).saturating_mul(k.into())) + // Minimum execution time: 45_328 nanoseconds. + Weight::from_ref_time(47_719_103) + // Standard Error: 14_458 + .saturating_add(Weight::from_ref_time(6_999_252).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -198,10 +199,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 73_490 nanoseconds. - Weight::from_ref_time(72_520_864) - // Standard Error: 7_090 - .saturating_add(Weight::from_ref_time(2_800_566).saturating_mul(n.into())) + // Minimum execution time: 74_650 nanoseconds. + Weight::from_ref_time(74_350_075) + // Standard Error: 10_527 + .saturating_add(Weight::from_ref_time(2_878_737).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -214,58 +215,58 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_293 nanoseconds. - Weight::from_ref_time(66_946_000) + // Minimum execution time: 67_790 nanoseconds. + Weight::from_ref_time(68_738_000) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_134 nanoseconds. - Weight::from_ref_time(18_497_000) + // Minimum execution time: 19_237 nanoseconds. + Weight::from_ref_time(19_534_000) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_728 nanoseconds. - Weight::from_ref_time(27_154_000) + // Minimum execution time: 27_288 nanoseconds. + Weight::from_ref_time(27_667_000) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 4_877 nanoseconds. - Weight::from_ref_time(5_028_000) + // Minimum execution time: 5_155 nanoseconds. + Weight::from_ref_time(5_464_000) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_000 nanoseconds. - Weight::from_ref_time(5_290_000) + // Minimum execution time: 5_405 nanoseconds. + Weight::from_ref_time(5_670_000) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_093 nanoseconds. - Weight::from_ref_time(5_378_000) + // Minimum execution time: 5_459 nanoseconds. + Weight::from_ref_time(5_616_000) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_144 nanoseconds. - Weight::from_ref_time(5_454_000) + // Minimum execution time: 5_476 nanoseconds. + Weight::from_ref_time(5_692_000) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_190 nanoseconds. - Weight::from_ref_time(5_960_962) - // Standard Error: 41 - .saturating_add(Weight::from_ref_time(10_329).saturating_mul(v.into())) + // Minimum execution time: 5_544 nanoseconds. + Weight::from_ref_time(6_513_190) + // Standard Error: 76 + .saturating_add(Weight::from_ref_time(9_975).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) @@ -283,10 +284,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_516 nanoseconds. - Weight::from_ref_time(86_317_884) - // Standard Error: 2_212 - .saturating_add(Weight::from_ref_time(1_103_962).saturating_mul(s.into())) + // Minimum execution time: 82_414 nanoseconds. + Weight::from_ref_time(88_511_246) + // Standard Error: 2_622 + .saturating_add(Weight::from_ref_time(1_131_814).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -294,10 +295,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 91_795 nanoseconds. - Weight::from_ref_time(904_524_900) - // Standard Error: 59_193 - .saturating_add(Weight::from_ref_time(4_944_680).saturating_mul(s.into())) + // Minimum execution time: 94_197 nanoseconds. + Weight::from_ref_time(903_418_326) + // Standard Error: 59_354 + .saturating_add(Weight::from_ref_time(4_948_354).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -312,10 +313,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_774 nanoseconds. - Weight::from_ref_time(178_857_156) - // Standard Error: 15_229 - .saturating_add(Weight::from_ref_time(22_112_174).saturating_mul(n.into())) + // Minimum execution time: 133_065 nanoseconds. + Weight::from_ref_time(197_555_906) + // Standard Error: 19_561 + .saturating_add(Weight::from_ref_time(22_683_426).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2)) @@ -333,10 +334,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 161_910 nanoseconds. - Weight::from_ref_time(217_635_072) - // Standard Error: 30_726 - .saturating_add(Weight::from_ref_time(31_244_329).saturating_mul(n.into())) + // Minimum execution time: 164_719 nanoseconds. + Weight::from_ref_time(226_304_276) + // Standard Error: 31_675 + .saturating_add(Weight::from_ref_time(32_622_427).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3)) @@ -350,10 +351,10 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_986 nanoseconds. - Weight::from_ref_time(94_880_481) - // Standard Error: 2_007 - .saturating_add(Weight::from_ref_time(31_421).saturating_mul(l.into())) + // Minimum execution time: 95_631 nanoseconds. + Weight::from_ref_time(96_861_556) + // Standard Error: 2_114 + .saturating_add(Weight::from_ref_time(37_543).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -372,10 +373,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_750 nanoseconds. - Weight::from_ref_time(95_115_568) - // Standard Error: 2_037 - .saturating_add(Weight::from_ref_time(1_086_488).saturating_mul(s.into())) + // Minimum execution time: 95_251 nanoseconds. + Weight::from_ref_time(97_818_954) + // Standard Error: 2_356 + .saturating_add(Weight::from_ref_time(1_104_695).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -396,19 +397,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) + // Storage: Staking MinimumActiveStake (r:0 w:1) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 506_543 nanoseconds. - Weight::from_ref_time(507_261_000) - // Standard Error: 1_766_631 - .saturating_add(Weight::from_ref_time(59_139_153).saturating_mul(v.into())) - // Standard Error: 176_035 - .saturating_add(Weight::from_ref_time(13_512_781).saturating_mul(n.into())) + // Minimum execution time: 512_923 nanoseconds. + Weight::from_ref_time(514_740_000) + // Standard Error: 1_790_238 + .saturating_add(Weight::from_ref_time(59_320_539).saturating_mul(v.into())) + // Standard Error: 178_387 + .saturating_add(Weight::from_ref_time(13_902_705).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) @@ -418,27 +420,29 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking MinimumActiveStake (r:0 w:1) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 24_155_382 nanoseconds. - Weight::from_ref_time(24_252_568_000) - // Standard Error: 319_250 - .saturating_add(Weight::from_ref_time(3_596_056).saturating_mul(v.into())) - // Standard Error: 319_250 - .saturating_add(Weight::from_ref_time(2_852_023).saturating_mul(n.into())) + // Minimum execution time: 24_913_316 nanoseconds. + Weight::from_ref_time(25_053_596_000) + // Standard Error: 324_610 + .saturating_add(Weight::from_ref_time(3_454_859).saturating_mul(v.into())) + // Standard Error: 324_610 + .saturating_add(Weight::from_ref_time(3_020_267).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1)) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_741_111 nanoseconds. - Weight::from_ref_time(113_360_179) - // Standard Error: 25_375 - .saturating_add(Weight::from_ref_time(9_494_142).saturating_mul(v.into())) + // Minimum execution time: 4_916_401 nanoseconds. + Weight::from_ref_time(81_160_966) + // Standard Error: 23_829 + .saturating_add(Weight::from_ref_time(9_883_413).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) } @@ -449,8 +453,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 11_074 nanoseconds. - Weight::from_ref_time(11_312_000) + // Minimum execution time: 10_937 nanoseconds. + Weight::from_ref_time(11_324_000) .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:0 w:1) @@ -460,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_795 nanoseconds. - Weight::from_ref_time(10_116_000) + // Minimum execution time: 9_424 nanoseconds. + Weight::from_ref_time(10_021_000) .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) @@ -475,19 +479,25 @@ impl WeightInfo for SubstrateWeight { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 82_914 nanoseconds. - Weight::from_ref_time(83_848_000) + // Minimum execution time: 84_495 nanoseconds. + Weight::from_ref_time(85_559_000) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 20_317 nanoseconds. - Weight::from_ref_time(20_639_000) + // Minimum execution time: 20_385 nanoseconds. + Weight::from_ref_time(20_824_000) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + // Storage: Staking MinCommission (r:0 w:1) + fn set_min_commission() -> Weight { + // Minimum execution time: 6_995 nanoseconds. + Weight::from_ref_time(7_213_000) + .saturating_add(T::DbWeight::get().writes(1)) + } } // For backwards compatibility and tests @@ -498,8 +508,8 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - // Minimum execution time: 54_402 nanoseconds. - Weight::from_ref_time(55_096_000) + // Minimum execution time: 54_884 nanoseconds. + Weight::from_ref_time(55_487_000) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().writes(4)) } @@ -509,8 +519,8 @@ impl WeightInfo for () { // Storage: VoterList ListNodes (r:3 w:3) // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - // Minimum execution time: 94_407 nanoseconds. - Weight::from_ref_time(95_209_000) + // Minimum execution time: 95_115 nanoseconds. + Weight::from_ref_time(96_213_000) .saturating_add(RocksDbWeight::get().reads(8)) .saturating_add(RocksDbWeight::get().writes(7)) } @@ -524,8 +534,8 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - // Minimum execution time: 101_046 nanoseconds. - Weight::from_ref_time(101_504_000) + // Minimum execution time: 102_031 nanoseconds. + Weight::from_ref_time(102_842_000) .saturating_add(RocksDbWeight::get().reads(12)) .saturating_add(RocksDbWeight::get().writes(8)) } @@ -535,10 +545,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - // Minimum execution time: 45_452 nanoseconds. - Weight::from_ref_time(47_031_537) - // Standard Error: 491 - .saturating_add(Weight::from_ref_time(67_148).saturating_mul(s.into())) + // Minimum execution time: 46_569 nanoseconds. + Weight::from_ref_time(48_034_493) + // Standard Error: 654 + .saturating_add(Weight::from_ref_time(63_628).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4)) .saturating_add(RocksDbWeight::get().writes(3)) } @@ -558,10 +568,10 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { - // Minimum execution time: 88_067 nanoseconds. - Weight::from_ref_time(93_309_587) - // Standard Error: 4_762 - .saturating_add(Weight::from_ref_time(1_114_938).saturating_mul(s.into())) + // Minimum execution time: 90_154 nanoseconds. + Weight::from_ref_time(95_725_631) + // Standard Error: 2_491 + .saturating_add(Weight::from_ref_time(1_110_795).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13)) .saturating_add(RocksDbWeight::get().writes(12)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -578,8 +588,8 @@ impl WeightInfo for () { // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - // Minimum execution time: 67_308 nanoseconds. - Weight::from_ref_time(68_266_000) + // Minimum execution time: 67_978 nanoseconds. + Weight::from_ref_time(69_153_000) .saturating_add(RocksDbWeight::get().reads(11)) .saturating_add(RocksDbWeight::get().writes(5)) } @@ -587,10 +597,10 @@ impl WeightInfo for () { // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - // Minimum execution time: 40_913 nanoseconds. - Weight::from_ref_time(48_140_584) - // Standard Error: 13_396 - .saturating_add(Weight::from_ref_time(6_862_893).saturating_mul(k.into())) + // Minimum execution time: 45_328 nanoseconds. + Weight::from_ref_time(47_719_103) + // Standard Error: 14_458 + .saturating_add(Weight::from_ref_time(6_999_252).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -608,10 +618,10 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - // Minimum execution time: 73_490 nanoseconds. - Weight::from_ref_time(72_520_864) - // Standard Error: 7_090 - .saturating_add(Weight::from_ref_time(2_800_566).saturating_mul(n.into())) + // Minimum execution time: 74_650 nanoseconds. + Weight::from_ref_time(74_350_075) + // Standard Error: 10_527 + .saturating_add(Weight::from_ref_time(2_878_737).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6)) @@ -624,58 +634,58 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - // Minimum execution time: 66_293 nanoseconds. - Weight::from_ref_time(66_946_000) + // Minimum execution time: 67_790 nanoseconds. + Weight::from_ref_time(68_738_000) .saturating_add(RocksDbWeight::get().reads(8)) .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - // Minimum execution time: 18_134 nanoseconds. - Weight::from_ref_time(18_497_000) + // Minimum execution time: 19_237 nanoseconds. + Weight::from_ref_time(19_534_000) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - // Minimum execution time: 26_728 nanoseconds. - Weight::from_ref_time(27_154_000) + // Minimum execution time: 27_288 nanoseconds. + Weight::from_ref_time(27_667_000) .saturating_add(RocksDbWeight::get().reads(3)) .saturating_add(RocksDbWeight::get().writes(3)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - // Minimum execution time: 4_877 nanoseconds. - Weight::from_ref_time(5_028_000) + // Minimum execution time: 5_155 nanoseconds. + Weight::from_ref_time(5_464_000) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - // Minimum execution time: 5_000 nanoseconds. - Weight::from_ref_time(5_290_000) + // Minimum execution time: 5_405 nanoseconds. + Weight::from_ref_time(5_670_000) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - // Minimum execution time: 5_093 nanoseconds. - Weight::from_ref_time(5_378_000) + // Minimum execution time: 5_459 nanoseconds. + Weight::from_ref_time(5_616_000) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - // Minimum execution time: 5_144 nanoseconds. - Weight::from_ref_time(5_454_000) + // Minimum execution time: 5_476 nanoseconds. + Weight::from_ref_time(5_692_000) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - // Minimum execution time: 5_190 nanoseconds. - Weight::from_ref_time(5_960_962) - // Standard Error: 41 - .saturating_add(Weight::from_ref_time(10_329).saturating_mul(v.into())) + // Minimum execution time: 5_544 nanoseconds. + Weight::from_ref_time(6_513_190) + // Standard Error: 76 + .saturating_add(Weight::from_ref_time(9_975).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking Bonded (r:1 w:1) @@ -693,10 +703,10 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - // Minimum execution time: 80_516 nanoseconds. - Weight::from_ref_time(86_317_884) - // Standard Error: 2_212 - .saturating_add(Weight::from_ref_time(1_103_962).saturating_mul(s.into())) + // Minimum execution time: 82_414 nanoseconds. + Weight::from_ref_time(88_511_246) + // Standard Error: 2_622 + .saturating_add(Weight::from_ref_time(1_131_814).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(11)) .saturating_add(RocksDbWeight::get().writes(12)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -704,10 +714,10 @@ impl WeightInfo for () { // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - // Minimum execution time: 91_795 nanoseconds. - Weight::from_ref_time(904_524_900) - // Standard Error: 59_193 - .saturating_add(Weight::from_ref_time(4_944_680).saturating_mul(s.into())) + // Minimum execution time: 94_197 nanoseconds. + Weight::from_ref_time(903_418_326) + // Standard Error: 59_354 + .saturating_add(Weight::from_ref_time(4_948_354).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1)) .saturating_add(RocksDbWeight::get().writes(1)) } @@ -722,10 +732,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Minimum execution time: 127_774 nanoseconds. - Weight::from_ref_time(178_857_156) - // Standard Error: 15_229 - .saturating_add(Weight::from_ref_time(22_112_174).saturating_mul(n.into())) + // Minimum execution time: 133_065 nanoseconds. + Weight::from_ref_time(197_555_906) + // Standard Error: 19_561 + .saturating_add(Weight::from_ref_time(22_683_426).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(9)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2)) @@ -743,10 +753,10 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - // Minimum execution time: 161_910 nanoseconds. - Weight::from_ref_time(217_635_072) - // Standard Error: 30_726 - .saturating_add(Weight::from_ref_time(31_244_329).saturating_mul(n.into())) + // Minimum execution time: 164_719 nanoseconds. + Weight::from_ref_time(226_304_276) + // Standard Error: 31_675 + .saturating_add(Weight::from_ref_time(32_622_427).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(10)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3)) @@ -760,10 +770,10 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - // Minimum execution time: 92_986 nanoseconds. - Weight::from_ref_time(94_880_481) - // Standard Error: 2_007 - .saturating_add(Weight::from_ref_time(31_421).saturating_mul(l.into())) + // Minimum execution time: 95_631 nanoseconds. + Weight::from_ref_time(96_861_556) + // Standard Error: 2_114 + .saturating_add(Weight::from_ref_time(37_543).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9)) .saturating_add(RocksDbWeight::get().writes(8)) } @@ -782,10 +792,10 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - // Minimum execution time: 92_750 nanoseconds. - Weight::from_ref_time(95_115_568) - // Standard Error: 2_037 - .saturating_add(Weight::from_ref_time(1_086_488).saturating_mul(s.into())) + // Minimum execution time: 95_251 nanoseconds. + Weight::from_ref_time(97_818_954) + // Standard Error: 2_356 + .saturating_add(Weight::from_ref_time(1_104_695).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12)) .saturating_add(RocksDbWeight::get().writes(12)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -806,19 +816,20 @@ impl WeightInfo for () { // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) + // Storage: Staking MinimumActiveStake (r:0 w:1) /// The range of component `v` is `[1, 10]`. /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 506_543 nanoseconds. - Weight::from_ref_time(507_261_000) - // Standard Error: 1_766_631 - .saturating_add(Weight::from_ref_time(59_139_153).saturating_mul(v.into())) - // Standard Error: 176_035 - .saturating_add(Weight::from_ref_time(13_512_781).saturating_mul(n.into())) + // Minimum execution time: 512_923 nanoseconds. + Weight::from_ref_time(514_740_000) + // Standard Error: 1_790_238 + .saturating_add(Weight::from_ref_time(59_320_539).saturating_mul(v.into())) + // Standard Error: 178_387 + .saturating_add(Weight::from_ref_time(13_902_705).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(3)) + .saturating_add(RocksDbWeight::get().writes(4)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) } // Storage: VoterList CounterForListNodes (r:1 w:0) @@ -828,27 +839,29 @@ impl WeightInfo for () { // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking MinimumActiveStake (r:0 w:1) /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. fn get_npos_voters(v: u32, n: u32, ) -> Weight { - // Minimum execution time: 24_155_382 nanoseconds. - Weight::from_ref_time(24_252_568_000) - // Standard Error: 319_250 - .saturating_add(Weight::from_ref_time(3_596_056).saturating_mul(v.into())) - // Standard Error: 319_250 - .saturating_add(Weight::from_ref_time(2_852_023).saturating_mul(n.into())) + // Minimum execution time: 24_913_316 nanoseconds. + Weight::from_ref_time(25_053_596_000) + // Standard Error: 324_610 + .saturating_add(Weight::from_ref_time(3_454_859).saturating_mul(v.into())) + // Standard Error: 324_610 + .saturating_add(Weight::from_ref_time(3_020_267).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(1)) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - // Minimum execution time: 4_741_111 nanoseconds. - Weight::from_ref_time(113_360_179) - // Standard Error: 25_375 - .saturating_add(Weight::from_ref_time(9_494_142).saturating_mul(v.into())) + // Minimum execution time: 4_916_401 nanoseconds. + Weight::from_ref_time(81_160_966) + // Standard Error: 23_829 + .saturating_add(Weight::from_ref_time(9_883_413).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) } @@ -859,8 +872,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - // Minimum execution time: 11_074 nanoseconds. - Weight::from_ref_time(11_312_000) + // Minimum execution time: 10_937 nanoseconds. + Weight::from_ref_time(11_324_000) .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:0 w:1) @@ -870,8 +883,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - // Minimum execution time: 9_795 nanoseconds. - Weight::from_ref_time(10_116_000) + // Minimum execution time: 9_424 nanoseconds. + Weight::from_ref_time(10_021_000) .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking Ledger (r:1 w:0) @@ -885,17 +898,23 @@ impl WeightInfo for () { // Storage: VoterList ListBags (r:1 w:1) // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - // Minimum execution time: 82_914 nanoseconds. - Weight::from_ref_time(83_848_000) + // Minimum execution time: 84_495 nanoseconds. + Weight::from_ref_time(85_559_000) .saturating_add(RocksDbWeight::get().reads(11)) .saturating_add(RocksDbWeight::get().writes(6)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - // Minimum execution time: 20_317 nanoseconds. - Weight::from_ref_time(20_639_000) + // Minimum execution time: 20_385 nanoseconds. + Weight::from_ref_time(20_824_000) .saturating_add(RocksDbWeight::get().reads(2)) .saturating_add(RocksDbWeight::get().writes(1)) } + // Storage: Staking MinCommission (r:0 w:1) + fn set_min_commission() -> Weight { + // Minimum execution time: 6_995 nanoseconds. + Weight::from_ref_time(7_213_000) + .saturating_add(RocksDbWeight::get().writes(1)) + } } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 9620038edfd6d..3af3225484fae 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -93,6 +93,10 @@ impl syn::parse::Parse for FunctionAttr { let call_index_content; syn::parenthesized!(call_index_content in content); let index = call_index_content.parse::()?; + if !index.suffix().is_empty() { + let msg = "Number literal must not have a suffix"; + return Err(syn::Error::new(index.span(), msg)) + } Ok(FunctionAttr::CallIndex(index.base10_parse()?)) } else { Err(lookahead.error()) diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index 77eb83adfbfb0..03a24bd3ba9c8 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -23,9 +23,11 @@ pub mod fungibles; pub mod imbalance; mod misc; pub mod nonfungible; +pub mod nonfungible_v2; pub mod nonfungibles; +pub mod nonfungibles_v2; pub use imbalance::Imbalance; pub use misc::{ - AssetId, Balance, BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, - Locker, WithdrawConsequence, WithdrawReasons, + AssetId, AttributeNamespace, Balance, BalanceConversion, BalanceStatus, DepositConsequence, + ExistenceRequirement, Locker, WithdrawConsequence, WithdrawReasons, }; diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 294d0e89c8b9e..f9876ef477b81 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -126,6 +126,21 @@ pub enum BalanceStatus { Reserved, } +/// Attribute namespaces for non-fungible tokens. +#[derive( + Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub enum AttributeNamespace { + /// An attribute was set by the pallet. + Pallet, + /// An attribute was set by collection's owner. + CollectionOwner, + /// An attribute was set by item's owner. + ItemOwner, + /// An attribute was set by pre-approved account. + Account(AccountId), +} + bitflags::bitflags! { /// Reasons for moving funds out of an account. #[derive(Encode, Decode, MaxEncodedLen)] diff --git a/frame/support/src/traits/tokens/nonfungible_v2.rs b/frame/support/src/traits/tokens/nonfungible_v2.rs new file mode 100644 index 0000000000000..a1b75e62e4db5 --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -0,0 +1,248 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with a single non-fungible item. +//! +//! This assumes a single-level namespace identified by `Inspect::ItemId`, and could +//! reasonably be implemented by pallets that want to expose a single collection of NFT-like +//! objects. +//! +//! For an NFT API that has dual-level namespacing, the traits in `nonfungibles` are better to +//! use. + +use super::nonfungibles_v2 as nonfungibles; +use crate::{ + dispatch::DispatchResult, + traits::{tokens::misc::AttributeNamespace, Get}, +}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to a read-only NFT-like item. +pub trait Inspect { + /// Type for identifying an item. + type ItemId; + + /// Returns the owner of `item`, or `None` if the item doesn't exist or has no + /// owner. + fn owner(item: &Self::ItemId) -> Option; + + /// Returns the attribute value of `item` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + _item: &Self::ItemId, + _namespace: &AttributeNamespace, + _key: &[u8], + ) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `item` corresponding to `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::attribute(item, namespace, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the `item` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(_item: &Self::ItemId) -> bool { + true + } +} + +/// Interface for enumerating items in existence or owned by a given account over a collection +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + + /// Returns an iterator of the items within a `collection` in existence. + fn items() -> Self::ItemsIterator; + + /// Returns an iterator of the items of all collections owned by `who`. + fn owned(who: &AccountId) -> Self::OwnedIterator; +} + +/// Trait for providing an interface for NFT-like items which may be minted, burned and/or have +/// attributes set on them. +pub trait Mutate: Inspect { + /// Mint some `item` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into( + _item: &Self::ItemId, + _who: &AccountId, + _config: &ItemConfig, + _deposit_collection_owner: bool, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some `item`. + /// + /// By default, this is not a supported operation. + fn burn(_item: &Self::ItemId, _maybe_check_owner: Option<&AccountId>) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of `item`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute(_item: &Self::ItemId, _key: &[u8], _value: &[u8]) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `item`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(item, k, v))) + } +} + +/// Trait for transferring a non-fungible item. +pub trait Transfer: Inspect { + /// Transfer `item` into `destination` account. + fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult; +} + +/// Convert a `nonfungibles` trait implementation into a `nonfungible` trait implementation by +/// identifying a single item. +pub struct ItemOf< + F: nonfungibles::Inspect, + A: Get<>::CollectionId>, + AccountId, +>(sp_std::marker::PhantomData<(F, A, AccountId)>); + +impl< + F: nonfungibles::Inspect, + A: Get<>::CollectionId>, + AccountId, + > Inspect for ItemOf +{ + type ItemId = >::ItemId; + fn owner(item: &Self::ItemId) -> Option { + >::owner(&A::get(), item) + } + fn attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &[u8], + ) -> Option> { + >::attribute(&A::get(), item, namespace, key) + } + fn typed_attribute( + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &K, + ) -> Option { + >::typed_attribute(&A::get(), item, namespace, key) + } + fn can_transfer(item: &Self::ItemId) -> bool { + >::can_transfer(&A::get(), item) + } +} + +impl< + F: nonfungibles::InspectEnumerable, + A: Get<>::CollectionId>, + AccountId, + > InspectEnumerable for ItemOf +{ + type ItemsIterator = >::ItemsIterator; + type OwnedIterator = + >::OwnedInCollectionIterator; + + fn items() -> Self::ItemsIterator { + >::items(&A::get()) + } + fn owned(who: &AccountId) -> Self::OwnedIterator { + >::owned_in_collection(&A::get(), who) + } +} + +impl< + F: nonfungibles::Mutate, + A: Get<>::CollectionId>, + AccountId, + ItemConfig, + > Mutate for ItemOf +{ + fn mint_into( + item: &Self::ItemId, + who: &AccountId, + config: &ItemConfig, + deposit_collection_owner: bool, + ) -> DispatchResult { + >::mint_into( + &A::get(), + item, + who, + config, + deposit_collection_owner, + ) + } + fn burn(item: &Self::ItemId, maybe_check_owner: Option<&AccountId>) -> DispatchResult { + >::burn(&A::get(), item, maybe_check_owner) + } + fn set_attribute(item: &Self::ItemId, key: &[u8], value: &[u8]) -> DispatchResult { + >::set_attribute( + &A::get(), + item, + key, + value, + ) + } + fn set_typed_attribute( + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + >::set_typed_attribute( + &A::get(), + item, + key, + value, + ) + } +} + +impl< + F: nonfungibles::Transfer, + A: Get<>::CollectionId>, + AccountId, + > Transfer for ItemOf +{ + fn transfer(item: &Self::ItemId, destination: &AccountId) -> DispatchResult { + >::transfer(&A::get(), item, destination) + } +} diff --git a/frame/support/src/traits/tokens/nonfungibles_v2.rs b/frame/support/src/traits/tokens/nonfungibles_v2.rs new file mode 100644 index 0000000000000..d2f5f5529fa96 --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -0,0 +1,257 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with multiple collections of non-fungible items. +//! +//! This assumes a dual-level namespace identified by `Inspect::ItemId`, and could +//! reasonably be implemented by pallets which want to expose multiple independent collections of +//! NFT-like objects. +//! +//! For an NFT API which has single-level namespacing, the traits in `nonfungible` are better to +//! use. +//! +//! Implementations of these traits may be converted to implementations of corresponding +//! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. + +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::tokens::misc::AttributeNamespace, +}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to many read-only NFT-like sets of items. +pub trait Inspect { + /// Type for identifying an item. + type ItemId; + + /// Type for identifying a collection (an identifier for an independent collection of + /// items). + type CollectionId; + + /// Returns the owner of `item` of `collection`, or `None` if the item doesn't exist + /// (or somehow has no owner). + fn owner(collection: &Self::CollectionId, item: &Self::ItemId) -> Option; + + /// Returns the owner of the `collection`, if there is one. For many NFTs this may not + /// make any sense, so users of this API should not be surprised to find a collection + /// results in `None` here. + fn collection_owner(_collection: &Self::CollectionId) -> Option { + None + } + + /// Returns the attribute value of `item` of `collection` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _namespace: &AttributeNamespace, + _key: &[u8], + ) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `item` of `collection` corresponding to + /// `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + namespace: &AttributeNamespace, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::attribute(collection, item, namespace, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns the attribute value of `collection` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn collection_attribute(_collection: &Self::CollectionId, _key: &[u8]) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `collection` corresponding to `key`. + /// + /// By default this just attempts to use `collection_attribute`. + fn typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::collection_attribute(collection, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the `item` of `collection` may be transferred. + /// + /// Default implementation is that all items are transferable. + fn can_transfer(_collection: &Self::CollectionId, _item: &Self::ItemId) -> bool { + true + } +} + +/// Interface for enumerating items in existence or owned by a given account over many collections +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::collections`]. + type CollectionsIterator: Iterator; + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + /// The iterator type for [`Self::owned_in_collection`]. + type OwnedInCollectionIterator: Iterator; + + /// Returns an iterator of the collections in existence. + fn collections() -> Self::CollectionsIterator; + + /// Returns an iterator of the items of a `collection` in existence. + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator; + + /// Returns an iterator of the items of all collections owned by `who`. + fn owned(who: &AccountId) -> Self::OwnedIterator; + + /// Returns an iterator of the items of `collection` owned by `who`. + fn owned_in_collection( + collection: &Self::CollectionId, + who: &AccountId, + ) -> Self::OwnedInCollectionIterator; +} + +/// Trait for providing the ability to create collections of nonfungible items. +pub trait Create: Inspect { + /// Create a `collection` of nonfungible items to be owned by `who` and managed by `admin`. + fn create_collection( + who: &AccountId, + admin: &AccountId, + config: &CollectionConfig, + ) -> Result; +} + +/// Trait for providing the ability to destroy collections of nonfungible items. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an item. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an item. + fn get_destroy_witness(collection: &Self::CollectionId) -> Option; + + /// Destroy an existing fungible item. + /// * `collection`: The `CollectionId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional `AccountId` that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// item. + /// + /// If successful, this function will return the actual witness data from the destroyed item. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + collection: Self::CollectionId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} + +/// Trait for providing an interface for multiple collections of NFT-like items which may be +/// minted, burned and/or have attributes set on them. +pub trait Mutate: Inspect { + /// Mint some `item` of `collection` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _who: &AccountId, + _config: &ItemConfig, + _deposit_collection_owner: bool, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some `item` of `collection`. + /// + /// By default, this is not a supported operation. + fn burn( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _maybe_check_owner: Option<&AccountId>, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of `item` of `collection`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute( + _collection: &Self::CollectionId, + _item: &Self::ItemId, + _key: &[u8], + _value: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `item` of `collection`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + collection: &Self::CollectionId, + item: &Self::ItemId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(collection, item, k, v))) + } + + /// Set attribute `value` of `collection`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_collection_attribute( + _collection: &Self::CollectionId, + _key: &[u8], + _value: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `collection`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_collection_attribute( + collection: &Self::CollectionId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| { + value.using_encoded(|v| Self::set_collection_attribute(collection, k, v)) + }) + } +} + +/// Trait for transferring non-fungible sets of items. +pub trait Transfer: Inspect { + /// Transfer `item` of `collection` into `destination` account. + fn transfer( + collection: &Self::CollectionId, + item: &Self::ItemId, + destination: &AccountId, + ) -> DispatchResult; +} diff --git a/frame/support/test/tests/pallet_ui/call_index_has_suffix.rs b/frame/support/test/tests/pallet_ui/call_index_has_suffix.rs new file mode 100644 index 0000000000000..abe4dc199bf51 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_index_has_suffix.rs @@ -0,0 +1,20 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResultWithPostInfo; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0something)] + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_index_has_suffix.stderr b/frame/support/test/tests/pallet_ui/call_index_has_suffix.stderr new file mode 100644 index 0000000000000..2f4cead6cf70c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_index_has_suffix.stderr @@ -0,0 +1,5 @@ +error: Number literal must not have a suffix + --> tests/pallet_ui/call_index_has_suffix.rs:14:30 + | +14 | #[pallet::call_index(0something)] + | ^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 170555665d877..be31b39c11725 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -26,5 +26,5 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index a3af9897be5c7..364eb5e6d5bb1 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 163 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 9e87f87825b2a..371e90323d9cb 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 162 others + and 163 others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 281 others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index cce9fa70b3da5..b5443c6f327e4 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 877485dda2084..afc7aaa8768cf 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 78 others + and 79 others = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.rs b/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.rs new file mode 100644 index 0000000000000..99195d21be62e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::DispatchResultWithPostInfo; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(10_000something)] + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { Ok(().into()) } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.stderr b/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.stderr new file mode 100644 index 0000000000000..c9b2010f88bb3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/weight_argument_has_suffix.stderr @@ -0,0 +1,41 @@ +error: invalid suffix `something` for number literal + --> tests/pallet_ui/weight_argument_has_suffix.rs:15:26 + | +15 | #[pallet::weight(10_000something)] + | ^^^^^^^^^^^^^^^ invalid suffix `something` + | + = help: the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.) + +error[E0308]: mismatched types + --> tests/pallet_ui/weight_argument_has_suffix.rs:12:12 + | +12 | #[pallet::call] + | ^^^^ + | | + | expected trait `frame_support::dispatch::ClassifyDispatch`, found trait `frame_support::dispatch::WeighData` + | arguments to this function are incorrect + | + = note: expected reference `&dyn frame_support::dispatch::ClassifyDispatch<()>` + found reference `&dyn frame_support::dispatch::WeighData<()>` +note: associated function defined here + --> $WORKSPACE/frame/support/src/dispatch.rs + | + | fn classify_dispatch(&self, target: T) -> DispatchClass; + | ^^^^^^^^^^^^^^^^^ + +error[E0308]: mismatched types + --> tests/pallet_ui/weight_argument_has_suffix.rs:12:12 + | +12 | #[pallet::call] + | ^^^^ + | | + | expected trait `frame_support::dispatch::PaysFee`, found trait `frame_support::dispatch::WeighData` + | arguments to this function are incorrect + | + = note: expected reference `&dyn frame_support::dispatch::PaysFee<()>` + found reference `&dyn frame_support::dispatch::WeighData<()>` +note: associated function defined here + --> $WORKSPACE/frame/support/src/dispatch.rs + | + | fn pays_fee(&self, _target: T) -> Pays; + | ^^^^^^^^ diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 0ffc53d8b7978..1bde1238191a4 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -225,7 +225,8 @@ pub mod pallet { /// The amount which has been reported as inactive to Currency. #[pallet::storage] - pub type Inactive, I: 'static = ()> = StorageValue<_, BalanceOf, ValueQuery>; + pub type Deactivated, I: 'static = ()> = + StorageValue<_, BalanceOf, ValueQuery>; /// Proposal indices that have been approved but not yet awarded. #[pallet::storage] @@ -292,6 +293,8 @@ pub mod pallet { amount: BalanceOf, beneficiary: T::AccountId, }, + /// The inactive funds of the pallet have been updated. + UpdatedInactive { reactivated: BalanceOf, deactivated: BalanceOf }, } /// Error for the treasury pallet. @@ -321,13 +324,15 @@ pub mod pallet { /// # fn on_initialize(n: T::BlockNumber) -> Weight { let pot = Self::pot(); - let deactivated = Inactive::::get(); + let deactivated = Deactivated::::get(); if pot != deactivated { - match (pot > deactivated, pot.max(deactivated) - pot.min(deactivated)) { - (true, delta) => T::Currency::deactivate(delta), - (false, delta) => T::Currency::reactivate(delta), - } - Inactive::::put(&pot); + T::Currency::reactivate(deactivated); + T::Currency::deactivate(pot); + Deactivated::::put(&pot); + Self::deposit_event(Event::::UpdatedInactive { + reactivated: deactivated, + deactivated: pot, + }); } // Check to see if we should spend some funds! diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index d7b326164b7d3..f1f8fc8b59830 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -50,6 +50,34 @@ pub use rational::{Rational128, RationalInfinite}; use sp_std::{cmp::Ordering, fmt::Debug, prelude::*}; use traits::{BaseArithmetic, One, SaturatedConversion, Unsigned, Zero}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +/// Arithmetic errors. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum ArithmeticError { + /// Underflow. + Underflow, + /// Overflow. + Overflow, + /// Division by zero. + DivisionByZero, +} + +impl From for &'static str { + fn from(e: ArithmeticError) -> &'static str { + match e { + ArithmeticError::Underflow => "An underflow would occur", + ArithmeticError::Overflow => "An overflow would occur", + ArithmeticError::DivisionByZero => "Division by zero", + } + } +} + /// Trait for comparing two numbers with an threshold. /// /// Returns: diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 466d5696c7136..7fa64d28669d4 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -18,6 +18,11 @@ //! Primitive traits for the runtime arithmetic. use codec::HasCompact; +pub use ensure::{ + Ensure, EnsureAdd, EnsureAddAssign, EnsureDiv, EnsureDivAssign, EnsureFixedPointNumber, + EnsureFrom, EnsureInto, EnsureMul, EnsureMulAssign, EnsureOp, EnsureOpAssign, EnsureSub, + EnsureSubAssign, +}; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ checked_pow, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, @@ -302,3 +307,527 @@ pub trait SaturatedConversion { } } impl SaturatedConversion for T {} + +/// Arithmetic operations with safe error handling. +/// +/// This module provide a readable way to do safe arithmetics, turning this: +/// +/// ``` +/// # use sp_arithmetic::{traits::EnsureSub, ArithmeticError}; +/// # fn foo() -> Result<(), ArithmeticError> { +/// # let mut my_value: i32 = 1; +/// # let other_value: i32 = 1; +/// my_value = my_value.checked_sub(other_value).ok_or(ArithmeticError::Overflow)?; +/// # Ok(()) +/// # } +/// ``` +/// +/// into this: +/// +/// ``` +/// # use sp_arithmetic::{traits::EnsureSubAssign, ArithmeticError}; +/// # fn foo() -> Result<(), ArithmeticError> { +/// # let mut my_value: i32 = 1; +/// # let other_value: i32 = 1; +/// my_value.ensure_sub_assign(other_value)?; +/// # Ok(()) +/// # } +/// ``` +/// +/// choosing the correct [`ArithmeticError`](crate::ArithmeticError) it should return in case of +/// fail. +/// +/// The *EnsureOps* family functions follows the same behavior as *CheckedOps* but +/// returning an [`ArithmeticError`](crate::ArithmeticError) instead of `None`. +mod ensure { + use super::{CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Zero}; + use crate::{ArithmeticError, FixedPointNumber, FixedPointOperand}; + + /// Performs addition that returns [`ArithmeticError`] instead of wrapping around on overflow. + pub trait EnsureAdd: CheckedAdd + PartialOrd + Zero + Copy { + /// Adds two numbers, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// Similar to [`CheckedAdd::checked_add()`] but returning an [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureAdd, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// u32::MAX.ensure_add(1)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// i32::MIN.ensure_add(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_add(self, v: Self) -> Result { + self.checked_add(&v).ok_or_else(|| error::equivalent(v)) + } + } + + /// Performs subtraction that returns [`ArithmeticError`] instead of wrapping around on + /// underflow. + pub trait EnsureSub: CheckedSub + PartialOrd + Zero + Copy { + /// Subtracts two numbers, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// Similar to [`CheckedSub::checked_sub()`] but returning an [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureSub, ArithmeticError}; + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// 0u32.ensure_sub(1)?; + /// Ok(()) + /// } + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// i32::MAX.ensure_sub(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// ``` + fn ensure_sub(self, v: Self) -> Result { + self.checked_sub(&v).ok_or_else(|| error::inverse(v)) + } + } + + /// Performs multiplication that returns [`ArithmeticError`] instead of wrapping around on + /// overflow. + pub trait EnsureMul: CheckedMul + PartialOrd + Zero + Copy { + /// Multiplies two numbers, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// Similar to [`CheckedMul::checked_mul()`] but returning an [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureMul, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// u32::MAX.ensure_mul(2)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// i32::MAX.ensure_mul(-2)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_mul(self, v: Self) -> Result { + self.checked_mul(&v).ok_or_else(|| error::multiplication(self, v)) + } + } + + /// Performs division that returns [`ArithmeticError`] instead of wrapping around on overflow. + pub trait EnsureDiv: CheckedDiv + PartialOrd + Zero + Copy { + /// Divides two numbers, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// Similar to [`CheckedDiv::checked_div()`] but returning an [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureDiv, ArithmeticError}; + /// + /// fn extrinsic_zero() -> Result<(), ArithmeticError> { + /// 1.ensure_div(0)?; + /// Ok(()) + /// } + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// i64::MIN.ensure_div(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(extrinsic_zero(), Err(ArithmeticError::DivisionByZero)); + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// ``` + fn ensure_div(self, v: Self) -> Result { + self.checked_div(&v).ok_or_else(|| error::division(self, v)) + } + } + + impl EnsureAdd for T {} + impl EnsureSub for T {} + impl EnsureMul for T {} + impl EnsureDiv for T {} + + /// Meta trait that supports all immutable arithmetic `Ensure*` operations + pub trait EnsureOp: EnsureAdd + EnsureSub + EnsureMul + EnsureDiv {} + impl EnsureOp for T {} + + /// Performs self addition that returns [`ArithmeticError`] instead of wrapping around on + /// overflow. + pub trait EnsureAddAssign: EnsureAdd { + /// Adds two numbers overwriting the left hand one, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureAddAssign, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let mut max = u32::MAX; + /// max.ensure_add_assign(1)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// let mut max = i32::MIN; + /// max.ensure_add_assign(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_add_assign(&mut self, v: Self) -> Result<(), ArithmeticError> { + *self = self.ensure_add(v)?; + Ok(()) + } + } + + /// Performs self subtraction that returns [`ArithmeticError`] instead of wrapping around on + /// underflow. + pub trait EnsureSubAssign: EnsureSub { + /// Subtracts two numbers overwriting the left hand one, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureSubAssign, ArithmeticError}; + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// let mut zero: u32 = 0; + /// zero.ensure_sub_assign(1)?; + /// Ok(()) + /// } + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let mut zero = i32::MAX; + /// zero.ensure_sub_assign(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// ``` + fn ensure_sub_assign(&mut self, v: Self) -> Result<(), ArithmeticError> { + *self = self.ensure_sub(v)?; + Ok(()) + } + } + + /// Performs self multiplication that returns [`ArithmeticError`] instead of wrapping around on + /// overflow. + pub trait EnsureMulAssign: EnsureMul { + /// Multiplies two numbers overwriting the left hand one, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureMulAssign, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let mut max = u32::MAX; + /// max.ensure_mul_assign(2)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// let mut max = i32::MAX; + /// max.ensure_mul_assign(-2)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_mul_assign(&mut self, v: Self) -> Result<(), ArithmeticError> { + *self = self.ensure_mul(v)?; + Ok(()) + } + } + + /// Performs self division that returns [`ArithmeticError`] instead of wrapping around on + /// overflow. + pub trait EnsureDivAssign: EnsureDiv { + /// Divides two numbers overwriting the left hand one, checking for overflow. + /// + /// If it fails, [`ArithmeticError`] is returned. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureDivAssign, ArithmeticError, FixedI64}; + /// + /// fn extrinsic_zero() -> Result<(), ArithmeticError> { + /// let mut one = 1; + /// one.ensure_div_assign(0)?; + /// Ok(()) + /// } + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let mut min = FixedI64::from(i64::MIN); + /// min.ensure_div_assign(FixedI64::from(-1))?; + /// Ok(()) + /// } + /// + /// assert_eq!(extrinsic_zero(), Err(ArithmeticError::DivisionByZero)); + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// ``` + fn ensure_div_assign(&mut self, v: Self) -> Result<(), ArithmeticError> { + *self = self.ensure_div(v)?; + Ok(()) + } + } + + impl EnsureAddAssign for T {} + impl EnsureSubAssign for T {} + impl EnsureMulAssign for T {} + impl EnsureDivAssign for T {} + + /// Meta trait that supports all assigned arithmetic `Ensure*` operations + pub trait EnsureOpAssign: + EnsureAddAssign + EnsureSubAssign + EnsureMulAssign + EnsureDivAssign + { + } + impl EnsureOpAssign + for T + { + } + + /// Meta trait that supports all arithmetic operations + pub trait Ensure: EnsureOp + EnsureOpAssign {} + impl Ensure for T {} + + /// Extends [`FixedPointNumber`] with the Ensure family functions. + pub trait EnsureFixedPointNumber: FixedPointNumber { + /// Creates `self` from a rational number. Equal to `n / d`. + /// + /// Returns [`ArithmeticError`] if `d == 0` or `n / d` exceeds accuracy. + /// + /// Similar to [`FixedPointNumber::checked_from_rational()`] but returning an + /// [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureFixedPointNumber, ArithmeticError, FixedI64}; + /// + /// fn extrinsic_zero() -> Result<(), ArithmeticError> { + /// FixedI64::ensure_from_rational(1, 0)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// FixedI64::ensure_from_rational(i64::MAX, -1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(extrinsic_zero(), Err(ArithmeticError::DivisionByZero)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_from_rational( + n: N, + d: D, + ) -> Result { + ::checked_from_rational(n, d) + .ok_or_else(|| error::division(n, d)) + } + + /// Ensure multiplication for integer type `N`. Equal to `self * n`. + /// + /// Returns [`ArithmeticError`] if the result does not fit in `N`. + /// + /// Similar to [`FixedPointNumber::checked_mul_int()`] but returning an [`ArithmeticError`] + /// error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureFixedPointNumber, ArithmeticError, FixedI64}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// FixedI64::from(i64::MAX).ensure_mul_int(2)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// FixedI64::from(i64::MAX).ensure_mul_int(-2)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_mul_int(self, n: N) -> Result { + self.checked_mul_int(n).ok_or_else(|| error::multiplication(self, n)) + } + + /// Ensure division for integer type `N`. Equal to `self / d`. + /// + /// Returns [`ArithmeticError`] if the result does not fit in `N` or `d == 0`. + /// + /// Similar to [`FixedPointNumber::checked_div_int()`] but returning an [`ArithmeticError`] + /// error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureFixedPointNumber, ArithmeticError, FixedI64}; + /// + /// fn extrinsic_zero() -> Result<(), ArithmeticError> { + /// FixedI64::from(1).ensure_div_int(0)?; + /// Ok(()) + /// } + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// FixedI64::from(i64::MIN).ensure_div_int(-1)?; + /// Ok(()) + /// } + /// + /// assert_eq!(extrinsic_zero(), Err(ArithmeticError::DivisionByZero)); + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// ``` + fn ensure_div_int(self, d: D) -> Result { + self.checked_div_int(d).ok_or_else(|| error::division(self, d)) + } + } + + impl EnsureFixedPointNumber for T {} + + /// Similar to [`TryFrom`] but returning an [`ArithmeticError`] error. + pub trait EnsureFrom: + TryFrom + PartialOrd + Zero + Copy + { + /// Performs the conversion returning an [`ArithmeticError`] if fails. + /// + /// Similar to [`TryFrom::try_from()`] but returning an [`ArithmeticError`] error. + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureFrom, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let byte: u8 = u8::ensure_from(256u16)?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// let byte: i8 = i8::ensure_from(-129i16)?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_from(other: T) -> Result { + Self::try_from(other).map_err(|_| error::equivalent(other)) + } + } + + /// Similar to [`TryInto`] but returning an [`ArithmeticError`] error. + pub trait EnsureInto: + TryInto + PartialOrd + Zero + Copy + { + /// Performs the conversion returning an [`ArithmeticError`] if fails. + /// + /// Similar to [`TryInto::try_into()`] but returning an [`ArithmeticError`] error + /// + /// ``` + /// use sp_arithmetic::{traits::EnsureInto, ArithmeticError}; + /// + /// fn overflow() -> Result<(), ArithmeticError> { + /// let byte: u8 = 256u16.ensure_into()?; + /// Ok(()) + /// } + /// + /// fn underflow() -> Result<(), ArithmeticError> { + /// let byte: i8 = (-129i16).ensure_into()?; + /// Ok(()) + /// } + /// + /// assert_eq!(overflow(), Err(ArithmeticError::Overflow)); + /// assert_eq!(underflow(), Err(ArithmeticError::Underflow)); + /// ``` + fn ensure_into(self) -> Result { + self.try_into().map_err(|_| error::equivalent(self)) + } + } + + impl + PartialOrd + Zero + Copy, S: PartialOrd + Zero + Copy> EnsureFrom for T {} + impl + PartialOrd + Zero + Copy, S: PartialOrd + Zero + Copy> EnsureInto for T {} + + mod error { + use super::{ArithmeticError, Zero}; + + #[derive(PartialEq)] + enum Signum { + Negative, + Positive, + } + + impl From for Signum { + fn from(value: T) -> Self { + if value < Zero::zero() { + Signum::Negative + } else { + Signum::Positive + } + } + } + + impl sp_std::ops::Mul for Signum { + type Output = Self; + + fn mul(self, rhs: Self) -> Self { + if self != rhs { + Signum::Negative + } else { + Signum::Positive + } + } + } + + pub fn equivalent(r: R) -> ArithmeticError { + match Signum::from(r) { + Signum::Negative => ArithmeticError::Underflow, + Signum::Positive => ArithmeticError::Overflow, + } + } + + pub fn inverse(r: R) -> ArithmeticError { + match Signum::from(r) { + Signum::Negative => ArithmeticError::Overflow, + Signum::Positive => ArithmeticError::Underflow, + } + } + + pub fn multiplication( + l: L, + r: R, + ) -> ArithmeticError { + match Signum::from(l) * Signum::from(r) { + Signum::Negative => ArithmeticError::Underflow, + Signum::Positive => ArithmeticError::Overflow, + } + } + + pub fn division( + n: N, + d: D, + ) -> ArithmeticError { + if d.is_zero() { + ArithmeticError::DivisionByZero + } else { + multiplication(n, d) + } + } + } +} diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 621ab859b914f..cb44afcb8d4e4 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -360,6 +360,25 @@ pub struct Epoch { pub config: BabeEpochConfiguration, } +/// Returns the epoch index the given slot belongs to. +pub fn epoch_index(slot: Slot, genesis_slot: Slot, epoch_duration: u64) -> u64 { + *slot.saturating_sub(genesis_slot) / epoch_duration +} + +/// Returns the first slot at the given epoch index. +pub fn epoch_start_slot(epoch_index: u64, genesis_slot: Slot, epoch_duration: u64) -> Slot { + // (epoch_index * epoch_duration) + genesis_slot + + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + epoch_index + .checked_mul(epoch_duration) + .and_then(|slot| slot.checked_add(*genesis_slot)) + .expect(PROOF) + .into() +} + sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index 71f3a80b27a64..1dc42104913cb 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -19,18 +19,18 @@ use crate::BlockStatus; use futures::FutureExt as _; -use sp_runtime::{generic::BlockId, traits::Block}; +use sp_runtime::traits::Block; use std::{error::Error, future::Future, pin::Pin, sync::Arc}; /// A type which provides access to chain information. pub trait Chain { - /// Retrieve the status of the block denoted by the given [`BlockId`]. - fn block_status(&self, id: &BlockId) -> Result>; + /// Retrieve the status of the block denoted by the given [`Block::Hash`]. + fn block_status(&self, hash: B::Hash) -> Result>; } impl, B: Block> Chain for Arc { - fn block_status(&self, id: &BlockId) -> Result> { - (&**self).block_status(id) + fn block_status(&self, hash: B::Hash) -> Result> { + (&**self).block_status(hash) } } @@ -60,7 +60,7 @@ pub trait BlockAnnounceValidator { /// Returning [`Validation::Failure`] will lead to a decrease of the /// peers reputation as it sent us invalid data. /// - /// The returned future should only resolve to an error iff there was an internal error + /// The returned future should only resolve to an error if there was an internal error /// validating the block announcement. If the block announcement itself is invalid, this should /// *always* return [`Validation::Failure`]. fn validate( diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e94efda86aa03..96fe7d2487f48 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -93,9 +93,9 @@ pub use sp_arithmetic::biguint; pub use sp_arithmetic::helpers_128bit; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - traits::SaturatedConversion, FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, - FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, - Rounding, UpperOf, + traits::SaturatedConversion, ArithmeticError, FixedI128, FixedI64, FixedPointNumber, + FixedPointOperand, FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, + Perquintill, Rational128, Rounding, UpperOf, }; pub use either::Either; @@ -641,28 +641,6 @@ impl From for DispatchError { } } -/// Arithmetic errors. -#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub enum ArithmeticError { - /// Underflow. - Underflow, - /// Overflow. - Overflow, - /// Division by zero. - DivisionByZero, -} - -impl From for &'static str { - fn from(e: ArithmeticError) -> &'static str { - match e { - ArithmeticError::Underflow => "An underflow would occur", - ArithmeticError::Overflow => "An overflow would occur", - ArithmeticError::DivisionByZero => "Division by zero", - } - } -} - impl From for DispatchError { fn from(e: ArithmeticError) -> DispatchError { Self::Arithmetic(e) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 375475141b818..8978cdb11c0c6 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -33,8 +33,10 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_application_crypto::AppKey; pub use sp_arithmetic::traits::{ AtLeast32Bit, AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, - CheckedShr, CheckedSub, IntegerSquareRoot, One, SaturatedConversion, Saturating, - UniqueSaturatedFrom, UniqueSaturatedInto, Zero, + CheckedShr, CheckedSub, Ensure, EnsureAdd, EnsureAddAssign, EnsureDiv, EnsureDivAssign, + EnsureFixedPointNumber, EnsureFrom, EnsureInto, EnsureMul, EnsureMulAssign, EnsureOp, + EnsureOpAssign, EnsureSub, EnsureSubAssign, IntegerSquareRoot, One, SaturatedConversion, + Saturating, UniqueSaturatedFrom, UniqueSaturatedInto, Zero, }; use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId}; #[doc(hidden)] diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 578158d8a2356..c69d95725eb14 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -94,9 +94,9 @@ where let block_num = BlockId::Number(i.into()); let parent_num = BlockId::Number(((i - 1) as u32).into()); let consumed = self.consumed_weight(&block_num)?; + let hash = self.client.expect_block_hash_from_id(&block_num)?; - let block = - self.client.block(&block_num)?.ok_or(format!("Block {} not found", block_num))?; + let block = self.client.block(hash)?.ok_or(format!("Block {} not found", block_num))?; let block = self.unsealed(block.block); let took = self.measure_block(&block, &parent_num)?; diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index c4cc2e757d90f..ed0f25cae7a36 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -30,11 +30,12 @@ sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" } substrate-rpc-client = { path = "../../rpc/client" } -parity-scale-codec = "3.0.0" -hex = "0.4.3" clap = { version = "4.0.9", features = ["derive"] } +hex = { version = "0.4.3", default-features = false } log = "0.4.17" +parity-scale-codec = "3.0.0" serde = "1.0.136" +serde_json = "1.0.85" zstd = { version = "0.11.2", default-features = false } [dev-dependencies] diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 80d34002fa771..ee5e21af5d3ba 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -134,6 +134,7 @@ where "TryRuntime_execute_block", &payload, full_extensions(), + shared.export_proof, )?; Ok(()) diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 4eb3b3a8f35a9..e4f166fe7ada7 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -150,6 +150,10 @@ where "TryRuntime_execute_block", (block, command.state_root_check, command.try_state.clone()).encode().as_ref(), full_extensions(), + shared + .export_proof + .as_ref() + .map(|path| path.as_path().join(&format!("{}.json", number))), ); if let Err(why) = result { diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 80fb5d31f71a9..2b2800d505864 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -59,6 +59,7 @@ where "TryRuntime_on_runtime_upgrade", command.checks.encode().as_ref(), Default::default(), // we don't really need any extensions here. + shared.export_proof, )?; let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 47a9dfa3f6544..aac2ad4238431 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -523,6 +523,12 @@ pub struct SharedParams { #[arg(long)] pub heap_pages: Option, + /// Path to a file to export the storage proof into (as a JSON). + /// If several blocks are executed, the path is interpreted as a folder + /// where one file per block will be written (named `{block_number}-{block_hash}`). + #[clap(long)] + pub export_proof: Option, + /// Overwrite the `state_version`. /// /// Otherwise `remote-externalities` will automatically set the correct state version. @@ -863,6 +869,7 @@ pub(crate) fn state_machine_call_with_proof, ) -> sc_cli::Result<(OverlayedChanges, Vec)> { use parity_scale_codec::Encode; @@ -891,6 +898,32 @@ pub(crate) fn state_machine_call_with_proof &'static str { log::error!(target: LOG_TARGET, "rpc error: {:?}", error); "rpc error." } + +/// Converts a [`sp_state_machine::StorageProof`] into a JSON string. +fn storage_proof_to_raw_json(storage_proof: &sp_state_machine::StorageProof) -> String { + serde_json::Value::Object( + storage_proof + .to_memory_db::() + .drain() + .iter() + .map(|(key, (value, _n))| { + ( + format!("0x{}", hex::encode(key.as_bytes())), + serde_json::Value::String(format!("0x{}", hex::encode(value))), + ) + }) + .collect(), + ) + .to_string() +}